Vendor containerd 2386062
and runtime-tools e29f3ca.
Signed-off-by: Lantao Liu <lantaol@google.com>
This commit is contained in:
parent
73748840da
commit
a4f7f7127b
249
Godeps/Godeps.json
generated
249
Godeps/Godeps.json
generated
@ -8,13 +8,13 @@
|
|||||||
"Deps": [
|
"Deps": [
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/Microsoft/go-winio",
|
"ImportPath": "github.com/Microsoft/go-winio",
|
||||||
"Comment": "v0.4.1",
|
"Comment": "v0.4.4",
|
||||||
"Rev": "706941bedd2d9b3a8c88e4022bd0078101f233f2"
|
"Rev": "7ff89941bcb93df2e962467fb073c6e997b13cf0"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/Sirupsen/logrus",
|
"ImportPath": "github.com/Sirupsen/logrus",
|
||||||
"Comment": "v0.11.0",
|
"Comment": "v1.0.0",
|
||||||
"Rev": "d26492970760ca5d33129d2d799e34be5c4782eb"
|
"Rev": "202f25545ea4cf9b191ff7f846df5d87c9382c2b"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/blang/semver",
|
"ImportPath": "github.com/blang/semver",
|
||||||
@ -28,157 +28,201 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd",
|
"ImportPath": "github.com/containerd/containerd",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/api/services/containers",
|
"ImportPath": "github.com/containerd/containerd/api/services/containers/v1",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/api/services/content",
|
"ImportPath": "github.com/containerd/containerd/api/services/content/v1",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/api/services/diff",
|
"ImportPath": "github.com/containerd/containerd/api/services/diff/v1",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/api/services/execution",
|
"ImportPath": "github.com/containerd/containerd/api/services/events/v1",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/api/services/images",
|
"ImportPath": "github.com/containerd/containerd/api/services/images/v1",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/api/services/namespaces",
|
"ImportPath": "github.com/containerd/containerd/api/services/namespaces/v1",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/api/services/snapshot",
|
"ImportPath": "github.com/containerd/containerd/api/services/snapshot/v1",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/api/services/version",
|
"ImportPath": "github.com/containerd/containerd/api/services/tasks/v1",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/api/types/descriptor",
|
"ImportPath": "github.com/containerd/containerd/api/services/version/v1",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/api/types/mount",
|
"ImportPath": "github.com/containerd/containerd/api/types",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/api/types/task",
|
"ImportPath": "github.com/containerd/containerd/api/types/task",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/containers",
|
"ImportPath": "github.com/containerd/containerd/containers",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/content",
|
"ImportPath": "github.com/containerd/containerd/content",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/containerd/containerd/errdefs",
|
||||||
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/containerd/containerd/events",
|
||||||
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/containerd/containerd/filters",
|
||||||
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/containerd/containerd/fs",
|
||||||
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/containerd/containerd/identifiers",
|
||||||
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/images",
|
"ImportPath": "github.com/containerd/containerd/images",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/containerd/containerd/linux/runcopts",
|
||||||
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/log",
|
"ImportPath": "github.com/containerd/containerd/log",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/metadata",
|
"ImportPath": "github.com/containerd/containerd/metadata",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/mount",
|
"ImportPath": "github.com/containerd/containerd/mount",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/namespaces",
|
"ImportPath": "github.com/containerd/containerd/namespaces",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/containerd/containerd/oci",
|
||||||
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/plugin",
|
"ImportPath": "github.com/containerd/containerd/plugin",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/reference",
|
"ImportPath": "github.com/containerd/containerd/reference",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/remotes",
|
"ImportPath": "github.com/containerd/containerd/remotes",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/remotes/docker",
|
"ImportPath": "github.com/containerd/containerd/remotes/docker",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/remotes/docker/schema1",
|
"ImportPath": "github.com/containerd/containerd/remotes/docker/schema1",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/rootfs",
|
"ImportPath": "github.com/containerd/containerd/rootfs",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/services/content",
|
"ImportPath": "github.com/containerd/containerd/services/content",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/services/diff",
|
"ImportPath": "github.com/containerd/containerd/services/diff",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/services/images",
|
"ImportPath": "github.com/containerd/containerd/services/images",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/services/snapshot",
|
"ImportPath": "github.com/containerd/containerd/services/snapshot",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/snapshot",
|
"ImportPath": "github.com/containerd/containerd/snapshot",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/containerd/containerd/typeurl",
|
||||||
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/containerd/continuity/sysx",
|
||||||
|
"Rev": "86cec1535a968310e7532819f699ff2830ed7463"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/fifo",
|
"ImportPath": "github.com/containerd/fifo",
|
||||||
"Rev": "69b99525e472735860a5269b75af1970142b3062"
|
"Rev": "fbfb6a11ec671efbe94ad1c12c2e98773f19e1e6"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containernetworking/cni/libcni",
|
"ImportPath": "github.com/containernetworking/cni/libcni",
|
||||||
@ -236,9 +280,8 @@
|
|||||||
"Rev": "092cba3727bb9b4a2f0e922cd6c0f93ea270e363"
|
"Rev": "092cba3727bb9b4a2f0e922cd6c0f93ea270e363"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/docker/docker/pkg/truncindex",
|
"ImportPath": "github.com/docker/go-events",
|
||||||
"Comment": "v1.13.1",
|
"Rev": "9461782956ad83b30282bf90e31fa6a70c255ba9"
|
||||||
"Rev": "092cba3727bb9b4a2f0e922cd6c0f93ea270e363"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/fsnotify/fsnotify",
|
"ImportPath": "github.com/fsnotify/fsnotify",
|
||||||
@ -280,15 +323,15 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/golang/protobuf/proto",
|
"ImportPath": "github.com/golang/protobuf/proto",
|
||||||
"Rev": "7a211bcf3bce0e3f1d74f9894916e6f116ae83b4"
|
"Rev": "5a0f697c9ed9d68fef0116532c6e05cfeae00e55"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/golang/protobuf/ptypes/any",
|
"ImportPath": "github.com/golang/protobuf/ptypes/any",
|
||||||
"Rev": "7a211bcf3bce0e3f1d74f9894916e6f116ae83b4"
|
"Rev": "5a0f697c9ed9d68fef0116532c6e05cfeae00e55"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/golang/protobuf/ptypes/empty",
|
"ImportPath": "github.com/golang/protobuf/ptypes/empty",
|
||||||
"Rev": "7a211bcf3bce0e3f1d74f9894916e6f116ae83b4"
|
"Rev": "5a0f697c9ed9d68fef0116532c6e05cfeae00e55"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/jpillora/backoff",
|
"ImportPath": "github.com/jpillora/backoff",
|
||||||
@ -305,45 +348,45 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/opencontainers/image-spec/identity",
|
"ImportPath": "github.com/opencontainers/image-spec/identity",
|
||||||
"Comment": "v1.0.0-rc6",
|
"Comment": "v1.0.0-rc6-12-g372ad78",
|
||||||
"Rev": "1a6593ab6c3ab5902072b4694a22ff19425396ae"
|
"Rev": "372ad780f63454fbbbbcc7cf80e5b90245c13e13"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/opencontainers/image-spec/specs-go",
|
"ImportPath": "github.com/opencontainers/image-spec/specs-go",
|
||||||
"Comment": "v1.0.0-rc6",
|
"Comment": "v1.0.0-rc6-12-g372ad78",
|
||||||
"Rev": "1a6593ab6c3ab5902072b4694a22ff19425396ae"
|
"Rev": "372ad780f63454fbbbbcc7cf80e5b90245c13e13"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/opencontainers/image-spec/specs-go/v1",
|
"ImportPath": "github.com/opencontainers/image-spec/specs-go/v1",
|
||||||
"Comment": "v1.0.0-rc6",
|
"Comment": "v1.0.0-rc6-12-g372ad78",
|
||||||
"Rev": "1a6593ab6c3ab5902072b4694a22ff19425396ae"
|
"Rev": "372ad780f63454fbbbbcc7cf80e5b90245c13e13"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/opencontainers/runc/libcontainer/configs",
|
"ImportPath": "github.com/opencontainers/runc/libcontainer/configs",
|
||||||
"Comment": "v1.0.0-rc3-74-g6394544",
|
"Comment": "v1.0.0-rc3-161-ge775f0f",
|
||||||
"Rev": "639454475cb9c8b861cc599f8bcd5c8c790ae402"
|
"Rev": "e775f0fba3ea329b8b766451c892c41a3d49594d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/opencontainers/runc/libcontainer/devices",
|
"ImportPath": "github.com/opencontainers/runc/libcontainer/devices",
|
||||||
"Comment": "v1.0.0-rc3-74-g6394544",
|
"Comment": "v1.0.0-rc3-161-ge775f0f",
|
||||||
"Rev": "639454475cb9c8b861cc599f8bcd5c8c790ae402"
|
"Rev": "e775f0fba3ea329b8b766451c892c41a3d49594d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/opencontainers/runtime-spec/specs-go",
|
"ImportPath": "github.com/opencontainers/runtime-spec/specs-go",
|
||||||
"Comment": "v1.0.0-rc5",
|
"Comment": "v1.0.0",
|
||||||
"Rev": "035da1dca3dfbb00d752eb58b0b158d6129f3776"
|
"Rev": "02137cd4e50b37a01665e1731fcd4ac2d2178230"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/opencontainers/runtime-tools/generate",
|
"ImportPath": "github.com/opencontainers/runtime-tools/generate",
|
||||||
"Rev": "68c195c3f2fa04a9a298b839eb2d94f31141271a"
|
"Rev": "e29f3ca4eb806a582ee1a1864c7b0563bd64c19b"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/opencontainers/runtime-tools/generate/seccomp",
|
"ImportPath": "github.com/opencontainers/runtime-tools/generate/seccomp",
|
||||||
"Rev": "68c195c3f2fa04a9a298b839eb2d94f31141271a"
|
"Rev": "e29f3ca4eb806a582ee1a1864c7b0563bd64c19b"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/opencontainers/runtime-tools/validate",
|
"ImportPath": "github.com/opencontainers/runtime-tools/validate",
|
||||||
"Rev": "68c195c3f2fa04a9a298b839eb2d94f31141271a"
|
"Rev": "e29f3ca4eb806a582ee1a1864c7b0563bd64c19b"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/pkg/errors",
|
"ImportPath": "github.com/pkg/errors",
|
||||||
@ -355,6 +398,11 @@
|
|||||||
"Comment": "v1.0.0",
|
"Comment": "v1.0.0",
|
||||||
"Rev": "792786c7400a136282c1664665ae0a8db921c6c2"
|
"Rev": "792786c7400a136282c1664665ae0a8db921c6c2"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/sirupsen/logrus",
|
||||||
|
"Comment": "v1.0.0",
|
||||||
|
"Rev": "202f25545ea4cf9b191ff7f846df5d87c9382c2b"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/spf13/pflag",
|
"ImportPath": "github.com/spf13/pflag",
|
||||||
"Rev": "9ff6c6923cfffbcd502984b8e0c80539a94968b7"
|
"Rev": "9ff6c6923cfffbcd502984b8e0c80539a94968b7"
|
||||||
@ -373,11 +421,6 @@
|
|||||||
"ImportPath": "github.com/syndtr/gocapability/capability",
|
"ImportPath": "github.com/syndtr/gocapability/capability",
|
||||||
"Rev": "e7cb7fa329f456b3855136a2642b197bad7366ba"
|
"Rev": "e7cb7fa329f456b3855136a2642b197bad7366ba"
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"ImportPath": "github.com/tchap/go-patricia/patricia",
|
|
||||||
"Comment": "v2.2.6",
|
|
||||||
"Rev": "666120de432aea38ab06bd5c818f04f4129882c9"
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/context",
|
"ImportPath": "golang.org/x/net/context",
|
||||||
"Rev": "7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6"
|
"Rev": "7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6"
|
||||||
@ -416,11 +459,11 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/sys/unix",
|
"ImportPath": "golang.org/x/sys/unix",
|
||||||
"Rev": "f3918c30c5c2cb527c0b071a27c35120a6c0719a"
|
"Rev": "739734461d1c916b6c72a63d7efda2b27edb369f"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/sys/windows",
|
"ImportPath": "golang.org/x/sys/windows",
|
||||||
"Rev": "f3918c30c5c2cb527c0b071a27c35120a6c0719a"
|
"Rev": "739734461d1c916b6c72a63d7efda2b27edb369f"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/secure/bidirule",
|
"ImportPath": "golang.org/x/text/secure/bidirule",
|
||||||
|
21
vendor/github.com/Microsoft/go-winio/file.go
generated
vendored
21
vendor/github.com/Microsoft/go-winio/file.go
generated
vendored
@ -23,6 +23,13 @@ type atomicBool int32
|
|||||||
func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 }
|
func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 }
|
||||||
func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) }
|
func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) }
|
||||||
func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) }
|
func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) }
|
||||||
|
func (b *atomicBool) swap(new bool) bool {
|
||||||
|
var newInt int32
|
||||||
|
if new {
|
||||||
|
newInt = 1
|
||||||
|
}
|
||||||
|
return atomic.SwapInt32((*int32)(b), newInt) == 1
|
||||||
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1
|
cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1
|
||||||
@ -71,7 +78,7 @@ func initIo() {
|
|||||||
type win32File struct {
|
type win32File struct {
|
||||||
handle syscall.Handle
|
handle syscall.Handle
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
closing bool
|
closing atomicBool
|
||||||
readDeadline deadlineHandler
|
readDeadline deadlineHandler
|
||||||
writeDeadline deadlineHandler
|
writeDeadline deadlineHandler
|
||||||
}
|
}
|
||||||
@ -107,9 +114,9 @@ func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) {
|
|||||||
|
|
||||||
// closeHandle closes the resources associated with a Win32 handle
|
// closeHandle closes the resources associated with a Win32 handle
|
||||||
func (f *win32File) closeHandle() {
|
func (f *win32File) closeHandle() {
|
||||||
if !f.closing {
|
// Atomically set that we are closing, releasing the resources only once.
|
||||||
|
if !f.closing.swap(true) {
|
||||||
// cancel all IO and wait for it to complete
|
// cancel all IO and wait for it to complete
|
||||||
f.closing = true
|
|
||||||
cancelIoEx(f.handle, nil)
|
cancelIoEx(f.handle, nil)
|
||||||
f.wg.Wait()
|
f.wg.Wait()
|
||||||
// at this point, no new IO can start
|
// at this point, no new IO can start
|
||||||
@ -127,10 +134,10 @@ func (f *win32File) Close() error {
|
|||||||
// prepareIo prepares for a new IO operation.
|
// prepareIo prepares for a new IO operation.
|
||||||
// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning.
|
// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning.
|
||||||
func (f *win32File) prepareIo() (*ioOperation, error) {
|
func (f *win32File) prepareIo() (*ioOperation, error) {
|
||||||
f.wg.Add(1)
|
if f.closing.isSet() {
|
||||||
if f.closing {
|
|
||||||
return nil, ErrFileClosed
|
return nil, ErrFileClosed
|
||||||
}
|
}
|
||||||
|
f.wg.Add(1)
|
||||||
c := &ioOperation{}
|
c := &ioOperation{}
|
||||||
c.ch = make(chan ioResult)
|
c.ch = make(chan ioResult)
|
||||||
return c, nil
|
return c, nil
|
||||||
@ -159,7 +166,7 @@ func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, er
|
|||||||
return int(bytes), err
|
return int(bytes), err
|
||||||
}
|
}
|
||||||
|
|
||||||
if f.closing {
|
if f.closing.isSet() {
|
||||||
cancelIoEx(f.handle, &c.o)
|
cancelIoEx(f.handle, &c.o)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -175,7 +182,7 @@ func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, er
|
|||||||
case r = <-c.ch:
|
case r = <-c.ch:
|
||||||
err = r.err
|
err = r.err
|
||||||
if err == syscall.ERROR_OPERATION_ABORTED {
|
if err == syscall.ERROR_OPERATION_ABORTED {
|
||||||
if f.closing {
|
if f.closing.isSet() {
|
||||||
err = ErrFileClosed
|
err = ErrFileClosed
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
15
vendor/github.com/Microsoft/go-winio/pipe.go
generated
vendored
15
vendor/github.com/Microsoft/go-winio/pipe.go
generated
vendored
@ -13,19 +13,12 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
//sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe
|
//sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe
|
||||||
//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *securityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW
|
//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW
|
||||||
//sys createFile(name string, access uint32, mode uint32, sa *securityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateFileW
|
//sys createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateFileW
|
||||||
//sys waitNamedPipe(name string, timeout uint32) (err error) = WaitNamedPipeW
|
//sys waitNamedPipe(name string, timeout uint32) (err error) = WaitNamedPipeW
|
||||||
//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo
|
//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo
|
||||||
//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW
|
//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW
|
||||||
//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc
|
//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc
|
||||||
//sys copyMemory(dst uintptr, src uintptr, length uint32) = RtlCopyMemory
|
|
||||||
|
|
||||||
type securityAttributes struct {
|
|
||||||
Length uint32
|
|
||||||
SecurityDescriptor uintptr
|
|
||||||
InheritHandle uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
const (
|
||||||
cERROR_PIPE_BUSY = syscall.Errno(231)
|
cERROR_PIPE_BUSY = syscall.Errno(231)
|
||||||
@ -233,13 +226,13 @@ func makeServerPipeHandle(path string, securityDescriptor []byte, c *PipeConfig,
|
|||||||
mode |= cPIPE_TYPE_MESSAGE
|
mode |= cPIPE_TYPE_MESSAGE
|
||||||
}
|
}
|
||||||
|
|
||||||
sa := &securityAttributes{}
|
sa := &syscall.SecurityAttributes{}
|
||||||
sa.Length = uint32(unsafe.Sizeof(*sa))
|
sa.Length = uint32(unsafe.Sizeof(*sa))
|
||||||
if securityDescriptor != nil {
|
if securityDescriptor != nil {
|
||||||
len := uint32(len(securityDescriptor))
|
len := uint32(len(securityDescriptor))
|
||||||
sa.SecurityDescriptor = localAlloc(0, len)
|
sa.SecurityDescriptor = localAlloc(0, len)
|
||||||
defer localFree(sa.SecurityDescriptor)
|
defer localFree(sa.SecurityDescriptor)
|
||||||
copyMemory(sa.SecurityDescriptor, uintptr(unsafe.Pointer(&securityDescriptor[0])), len)
|
copy((*[0xffff]byte)(unsafe.Pointer(sa.SecurityDescriptor))[:], securityDescriptor)
|
||||||
}
|
}
|
||||||
h, err := createNamedPipe(path, flags, mode, cPIPE_UNLIMITED_INSTANCES, uint32(c.OutputBufferSize), uint32(c.InputBufferSize), 0, sa)
|
h, err := createNamedPipe(path, flags, mode, cPIPE_UNLIMITED_INSTANCES, uint32(c.OutputBufferSize), uint32(c.InputBufferSize), 0, sa)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
14
vendor/github.com/Microsoft/go-winio/zsyscall_windows.go
generated
vendored
14
vendor/github.com/Microsoft/go-winio/zsyscall_windows.go
generated
vendored
@ -53,7 +53,6 @@ var (
|
|||||||
procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo")
|
procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo")
|
||||||
procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW")
|
procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW")
|
||||||
procLocalAlloc = modkernel32.NewProc("LocalAlloc")
|
procLocalAlloc = modkernel32.NewProc("LocalAlloc")
|
||||||
procRtlCopyMemory = modkernel32.NewProc("RtlCopyMemory")
|
|
||||||
procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW")
|
procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW")
|
||||||
procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW")
|
procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW")
|
||||||
procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW")
|
procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW")
|
||||||
@ -141,7 +140,7 @@ func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *securityAttributes) (handle syscall.Handle, err error) {
|
func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) {
|
||||||
var _p0 *uint16
|
var _p0 *uint16
|
||||||
_p0, err = syscall.UTF16PtrFromString(name)
|
_p0, err = syscall.UTF16PtrFromString(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -150,7 +149,7 @@ func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances ui
|
|||||||
return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa)
|
return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa)
|
||||||
}
|
}
|
||||||
|
|
||||||
func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *securityAttributes) (handle syscall.Handle, err error) {
|
func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) {
|
||||||
r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0)
|
r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0)
|
||||||
handle = syscall.Handle(r0)
|
handle = syscall.Handle(r0)
|
||||||
if handle == syscall.InvalidHandle {
|
if handle == syscall.InvalidHandle {
|
||||||
@ -163,7 +162,7 @@ func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func createFile(name string, access uint32, mode uint32, sa *securityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) {
|
func createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) {
|
||||||
var _p0 *uint16
|
var _p0 *uint16
|
||||||
_p0, err = syscall.UTF16PtrFromString(name)
|
_p0, err = syscall.UTF16PtrFromString(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -172,7 +171,7 @@ func createFile(name string, access uint32, mode uint32, sa *securityAttributes,
|
|||||||
return _createFile(_p0, access, mode, sa, createmode, attrs, templatefile)
|
return _createFile(_p0, access, mode, sa, createmode, attrs, templatefile)
|
||||||
}
|
}
|
||||||
|
|
||||||
func _createFile(name *uint16, access uint32, mode uint32, sa *securityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) {
|
func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) {
|
||||||
r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0)
|
r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0)
|
||||||
handle = syscall.Handle(r0)
|
handle = syscall.Handle(r0)
|
||||||
if handle == syscall.InvalidHandle {
|
if handle == syscall.InvalidHandle {
|
||||||
@ -236,11 +235,6 @@ func localAlloc(uFlags uint32, length uint32) (ptr uintptr) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func copyMemory(dst uintptr, src uintptr, length uint32) {
|
|
||||||
syscall.Syscall(procRtlCopyMemory.Addr(), 3, uintptr(dst), uintptr(src), uintptr(length))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
|
func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
|
||||||
var _p0 *uint16
|
var _p0 *uint16
|
||||||
_p0, err = syscall.UTF16PtrFromString(accountName)
|
_p0, err = syscall.UTF16PtrFromString(accountName)
|
||||||
|
15
vendor/github.com/Sirupsen/logrus/.travis.yml
generated
vendored
15
vendor/github.com/Sirupsen/logrus/.travis.yml
generated
vendored
@ -1,10 +1,13 @@
|
|||||||
language: go
|
language: go
|
||||||
go:
|
go:
|
||||||
- 1.3
|
- 1.6.x
|
||||||
- 1.4
|
- 1.7.x
|
||||||
- 1.5
|
- 1.8.x
|
||||||
- 1.6
|
|
||||||
- tip
|
- tip
|
||||||
|
env:
|
||||||
|
- GOMAXPROCS=4 GORACE=halt_on_error=1
|
||||||
install:
|
install:
|
||||||
- go get -t ./...
|
- go get github.com/stretchr/testify/assert
|
||||||
script: GOMAXPROCS=4 GORACE="halt_on_error=1" go test -race -v ./...
|
script:
|
||||||
|
- go test -race -v .
|
||||||
|
- cd hooks/null && go test -race -v .
|
||||||
|
34
vendor/github.com/Sirupsen/logrus/CHANGELOG.md
generated
vendored
34
vendor/github.com/Sirupsen/logrus/CHANGELOG.md
generated
vendored
@ -1,3 +1,37 @@
|
|||||||
|
# 1.0.0
|
||||||
|
|
||||||
|
* Officially changed name to lower-case
|
||||||
|
* bug: colors on Windows 10 (#541)
|
||||||
|
* bug: fix race in accessing level (#512)
|
||||||
|
|
||||||
|
# 0.11.5
|
||||||
|
|
||||||
|
* feature: add writer and writerlevel to entry (#372)
|
||||||
|
|
||||||
|
# 0.11.4
|
||||||
|
|
||||||
|
* bug: fix undefined variable on solaris (#493)
|
||||||
|
|
||||||
|
# 0.11.3
|
||||||
|
|
||||||
|
* formatter: configure quoting of empty values (#484)
|
||||||
|
* formatter: configure quoting character (default is `"`) (#484)
|
||||||
|
* bug: fix not importing io correctly in non-linux environments (#481)
|
||||||
|
|
||||||
|
# 0.11.2
|
||||||
|
|
||||||
|
* bug: fix windows terminal detection (#476)
|
||||||
|
|
||||||
|
# 0.11.1
|
||||||
|
|
||||||
|
* bug: fix tty detection with custom out (#471)
|
||||||
|
|
||||||
|
# 0.11.0
|
||||||
|
|
||||||
|
* performance: Use bufferpool to allocate (#370)
|
||||||
|
* terminal: terminal detection for app-engine (#343)
|
||||||
|
* feature: exit handler (#375)
|
||||||
|
|
||||||
# 0.10.0
|
# 0.10.0
|
||||||
|
|
||||||
* feature: Add a test hook (#180)
|
* feature: Add a test hook (#180)
|
||||||
|
174
vendor/github.com/Sirupsen/logrus/README.md
generated
vendored
174
vendor/github.com/Sirupsen/logrus/README.md
generated
vendored
@ -1,4 +1,4 @@
|
|||||||
# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/> [](https://travis-ci.org/Sirupsen/logrus) [](https://godoc.org/github.com/Sirupsen/logrus)
|
# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/> [](https://travis-ci.org/sirupsen/logrus) [](https://godoc.org/github.com/sirupsen/logrus)
|
||||||
|
|
||||||
Logrus is a structured logger for Go (golang), completely API compatible with
|
Logrus is a structured logger for Go (golang), completely API compatible with
|
||||||
the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
|
the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
|
||||||
@ -7,6 +7,17 @@ many large deployments. The core API is unlikely to change much but please
|
|||||||
version control your Logrus to make sure you aren't fetching latest `master` on
|
version control your Logrus to make sure you aren't fetching latest `master` on
|
||||||
every build.**
|
every build.**
|
||||||
|
|
||||||
|
**Seeing weird case-sensitive problems?** Unfortunately, the author failed to
|
||||||
|
realize the consequences of renaming to lower-case. Due to the Go package
|
||||||
|
environment, this caused issues. Regretfully, there's no turning back now.
|
||||||
|
Everything using `logrus` will need to use the lower-case:
|
||||||
|
`github.com/sirupsen/logrus`. Any package that isn't, should be changed.
|
||||||
|
|
||||||
|
I am terribly sorry for this inconvenience. Logrus strives hard for backwards
|
||||||
|
compatibility, and the author failed to realize the cascading consequences of
|
||||||
|
such a name-change. To fix Glide, see [these
|
||||||
|
comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437).
|
||||||
|
|
||||||
Nicely color-coded in development (when a TTY is attached, otherwise just
|
Nicely color-coded in development (when a TTY is attached, otherwise just
|
||||||
plain text):
|
plain text):
|
||||||
|
|
||||||
@ -46,6 +57,12 @@ time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x20822
|
|||||||
exit status 1
|
exit status 1
|
||||||
```
|
```
|
||||||
|
|
||||||
|
#### Case-sensitivity
|
||||||
|
|
||||||
|
The organization's name was changed to lower-case--and this will not be changed
|
||||||
|
back. If you are getting import conflicts due to case sensitivity, please use
|
||||||
|
the lower-case import: `github.com/sirupsen/logrus`.
|
||||||
|
|
||||||
#### Example
|
#### Example
|
||||||
|
|
||||||
The simplest way to use Logrus is simply the package-level exported logger:
|
The simplest way to use Logrus is simply the package-level exported logger:
|
||||||
@ -54,7 +71,7 @@ The simplest way to use Logrus is simply the package-level exported logger:
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
log "github.com/Sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
@ -65,7 +82,7 @@ func main() {
|
|||||||
```
|
```
|
||||||
|
|
||||||
Note that it's completely api-compatible with the stdlib logger, so you can
|
Note that it's completely api-compatible with the stdlib logger, so you can
|
||||||
replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"`
|
replace your `log` imports everywhere with `log "github.com/sirupsen/logrus"`
|
||||||
and you'll now have the flexibility of Logrus. You can customize it all you
|
and you'll now have the flexibility of Logrus. You can customize it all you
|
||||||
want:
|
want:
|
||||||
|
|
||||||
@ -74,15 +91,16 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
log "github.com/Sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
// Log as JSON instead of the default ASCII formatter.
|
// Log as JSON instead of the default ASCII formatter.
|
||||||
log.SetFormatter(&log.JSONFormatter{})
|
log.SetFormatter(&log.JSONFormatter{})
|
||||||
|
|
||||||
// Output to stderr instead of stdout, could also be a file.
|
// Output to stdout instead of the default stderr
|
||||||
log.SetOutput(os.Stderr)
|
// Can be any io.Writer, see below for File example
|
||||||
|
log.SetOutput(os.Stdout)
|
||||||
|
|
||||||
// Only log the warning severity or above.
|
// Only log the warning severity or above.
|
||||||
log.SetLevel(log.WarnLevel)
|
log.SetLevel(log.WarnLevel)
|
||||||
@ -123,7 +141,8 @@ application, you can also create an instance of the `logrus` Logger:
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/Sirupsen/logrus"
|
"os"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Create a new instance of the logger. You can have any number of instances.
|
// Create a new instance of the logger. You can have any number of instances.
|
||||||
@ -132,7 +151,15 @@ var log = logrus.New()
|
|||||||
func main() {
|
func main() {
|
||||||
// The API for setting attributes is a little different than the package level
|
// The API for setting attributes is a little different than the package level
|
||||||
// exported logger. See Godoc.
|
// exported logger. See Godoc.
|
||||||
log.Out = os.Stderr
|
log.Out = os.Stdout
|
||||||
|
|
||||||
|
// You could set this to any `io.Writer` such as a file
|
||||||
|
// file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY, 0666)
|
||||||
|
// if err == nil {
|
||||||
|
// log.Out = file
|
||||||
|
// } else {
|
||||||
|
// log.Info("Failed to log to file, using default stderr")
|
||||||
|
// }
|
||||||
|
|
||||||
log.WithFields(logrus.Fields{
|
log.WithFields(logrus.Fields{
|
||||||
"animal": "walrus",
|
"animal": "walrus",
|
||||||
@ -143,7 +170,7 @@ func main() {
|
|||||||
|
|
||||||
#### Fields
|
#### Fields
|
||||||
|
|
||||||
Logrus encourages careful, structured logging though logging fields instead of
|
Logrus encourages careful, structured logging through logging fields instead of
|
||||||
long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
|
long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
|
||||||
to send event %s to topic %s with key %d")`, you should log the much more
|
to send event %s to topic %s with key %d")`, you should log the much more
|
||||||
discoverable:
|
discoverable:
|
||||||
@ -165,6 +192,20 @@ In general, with Logrus using any of the `printf`-family functions should be
|
|||||||
seen as a hint you should add a field, however, you can still use the
|
seen as a hint you should add a field, however, you can still use the
|
||||||
`printf`-family functions with Logrus.
|
`printf`-family functions with Logrus.
|
||||||
|
|
||||||
|
#### Default Fields
|
||||||
|
|
||||||
|
Often it's helpful to have fields _always_ attached to log statements in an
|
||||||
|
application or parts of one. For example, you may want to always log the
|
||||||
|
`request_id` and `user_ip` in the context of a request. Instead of writing
|
||||||
|
`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on
|
||||||
|
every line, you can create a `logrus.Entry` to pass around instead:
|
||||||
|
|
||||||
|
```go
|
||||||
|
requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})
|
||||||
|
requestLogger.Info("something happened on that request") # will log request_id and user_ip
|
||||||
|
requestLogger.Warn("something not great happened")
|
||||||
|
```
|
||||||
|
|
||||||
#### Hooks
|
#### Hooks
|
||||||
|
|
||||||
You can add hooks for logging levels. For example to send errors to an exception
|
You can add hooks for logging levels. For example to send errors to an exception
|
||||||
@ -176,9 +217,9 @@ Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
|
|||||||
|
|
||||||
```go
|
```go
|
||||||
import (
|
import (
|
||||||
log "github.com/Sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake"
|
"gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake"
|
||||||
logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
|
logrus_syslog "github.com/sirupsen/logrus/hooks/syslog"
|
||||||
"log/syslog"
|
"log/syslog"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -200,40 +241,51 @@ Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/v
|
|||||||
|
|
||||||
| Hook | Description |
|
| Hook | Description |
|
||||||
| ----- | ----------- |
|
| ----- | ----------- |
|
||||||
| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. |
|
|
||||||
| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
|
| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
|
||||||
| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
|
| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. |
|
||||||
| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
|
| [Amazon Kinesis](https://github.com/evalphobia/logrus_kinesis) | Hook for logging to [Amazon Kinesis](https://aws.amazon.com/kinesis/) |
|
||||||
| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
|
|
||||||
| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
|
|
||||||
| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
|
|
||||||
| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
|
|
||||||
| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
|
|
||||||
| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
|
|
||||||
| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) |
|
|
||||||
| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
|
|
||||||
| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
|
|
||||||
| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
|
|
||||||
| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
|
|
||||||
| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
|
|
||||||
| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
|
|
||||||
| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
|
|
||||||
| [Influxus] (http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB] (http://influxdata.com/) |
|
|
||||||
| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
|
|
||||||
| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
|
|
||||||
| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
|
|
||||||
| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) |
|
|
||||||
| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) |
|
| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) |
|
||||||
| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka |
|
| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
|
||||||
| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) |
|
| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
|
||||||
|
| [Discordrus](https://github.com/kz/discordrus) | Hook for logging to [Discord](https://discordapp.com/) |
|
||||||
| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch|
|
| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch|
|
||||||
| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)|
|
| [Firehose](https://github.com/beaubrewer/logrus_firehose) | Hook for logging to [Amazon Firehose](https://aws.amazon.com/kinesis/firehose/)
|
||||||
| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)|
|
| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
|
||||||
| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) |
|
| [Go-Slack](https://github.com/multiplay/go-slack) | Hook for logging to [Slack](https://slack.com) |
|
||||||
| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash |
|
| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) |
|
||||||
|
| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
|
||||||
|
| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
|
||||||
|
| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
|
||||||
|
| [Influxus](http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB](http://influxdata.com/) |
|
||||||
|
| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
|
||||||
|
| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka |
|
||||||
|
| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
|
||||||
|
| [Logentries](https://github.com/jcftang/logentriesrus) | Hook for logging to [Logentries](https://logentries.com/) |
|
||||||
|
| [Logentrus](https://github.com/puddingfactory/logentrus) | Hook for logging to [Logentries](https://logentries.com/) |
|
||||||
| [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) |
|
| [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) |
|
||||||
|
| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
|
||||||
|
| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) |
|
||||||
|
| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
|
||||||
|
| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
|
||||||
|
| [NATS-Hook](https://github.com/rybit/nats_logrus_hook) | Hook for logging to [NATS](https://nats.io) |
|
||||||
|
| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
|
||||||
|
| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
|
||||||
|
| [PostgreSQL](https://github.com/gemnasium/logrus-postgresql-hook) | Send logs to [PostgreSQL](http://postgresql.org) |
|
||||||
| [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) |
|
| [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) |
|
||||||
|
| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
|
||||||
|
| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) |
|
||||||
|
| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
|
||||||
|
| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)|
|
||||||
|
| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
|
||||||
|
| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
|
||||||
|
| [Stackdriver](https://github.com/knq/sdhook) | Hook for logging to [Google Stackdriver](https://cloud.google.com/logging/) |
|
||||||
|
| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)|
|
||||||
|
| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
|
||||||
|
| [Syslog TLS](https://github.com/shinji62/logrus-syslog-ng) | Send errors to remote syslog server with TLS support. |
|
||||||
|
| [TraceView](https://github.com/evalphobia/logrus_appneta) | Hook for logging to [AppNeta TraceView](https://www.appneta.com/products/traceview/) |
|
||||||
|
| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) |
|
||||||
|
| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash |
|
||||||
|
| [SQS-Hook](https://github.com/tsarpaul/logrus_sqs) | Hook for logging to [Amazon Simple Queue Service (SQS)](https://aws.amazon.com/sqs/) |
|
||||||
|
|
||||||
#### Level logging
|
#### Level logging
|
||||||
|
|
||||||
@ -282,7 +334,7 @@ could do:
|
|||||||
|
|
||||||
```go
|
```go
|
||||||
import (
|
import (
|
||||||
log "github.com/Sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
init() {
|
init() {
|
||||||
@ -309,8 +361,11 @@ The built-in logging formatters are:
|
|||||||
without colors.
|
without colors.
|
||||||
* *Note:* to force colored output when there is no TTY, set the `ForceColors`
|
* *Note:* to force colored output when there is no TTY, set the `ForceColors`
|
||||||
field to `true`. To force no colored output even if there is a TTY set the
|
field to `true`. To force no colored output even if there is a TTY set the
|
||||||
`DisableColors` field to `true`
|
`DisableColors` field to `true`. For Windows, see
|
||||||
|
[github.com/mattn/go-colorable](https://github.com/mattn/go-colorable).
|
||||||
|
* All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter).
|
||||||
* `logrus.JSONFormatter`. Logs fields as JSON.
|
* `logrus.JSONFormatter`. Logs fields as JSON.
|
||||||
|
* All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter).
|
||||||
|
|
||||||
Third party logging formatters:
|
Third party logging formatters:
|
||||||
|
|
||||||
@ -359,6 +414,18 @@ srv := http.Server{
|
|||||||
Each line written to that writer will be printed the usual way, using formatters
|
Each line written to that writer will be printed the usual way, using formatters
|
||||||
and hooks. The level for those entries is `info`.
|
and hooks. The level for those entries is `info`.
|
||||||
|
|
||||||
|
This means that we can override the standard library logger easily:
|
||||||
|
|
||||||
|
```go
|
||||||
|
logger := logrus.New()
|
||||||
|
logger.Formatter = &logrus.JSONFormatter{}
|
||||||
|
|
||||||
|
// Use logrus for standard log output
|
||||||
|
// Note that `log` here references stdlib's log
|
||||||
|
// Not logrus imported under the name `log`.
|
||||||
|
log.SetOutput(logger.Writer())
|
||||||
|
```
|
||||||
|
|
||||||
#### Rotation
|
#### Rotation
|
||||||
|
|
||||||
Log rotation is not provided with Logrus. Log rotation should be done by an
|
Log rotation is not provided with Logrus. Log rotation should be done by an
|
||||||
@ -370,7 +437,7 @@ entries. It should not be a feature of the application-level logger.
|
|||||||
| Tool | Description |
|
| Tool | Description |
|
||||||
| ---- | ----------- |
|
| ---- | ----------- |
|
||||||
|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.|
|
|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.|
|
||||||
|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper arround Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) |
|
|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) |
|
||||||
|
|
||||||
#### Testing
|
#### Testing
|
||||||
|
|
||||||
@ -380,15 +447,24 @@ Logrus has a built in facility for asserting the presence of log messages. This
|
|||||||
* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any):
|
* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any):
|
||||||
|
|
||||||
```go
|
```go
|
||||||
logger, hook := NewNullLogger()
|
import(
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/sirupsen/logrus/hooks/null"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSomething(t*testing.T){
|
||||||
|
logger, hook := null.NewNullLogger()
|
||||||
logger.Error("Helloerror")
|
logger.Error("Helloerror")
|
||||||
|
|
||||||
assert.Equal(1, len(hook.Entries))
|
assert.Equal(t, 1, len(hook.Entries))
|
||||||
assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level)
|
assert.Equal(t, logrus.ErrorLevel, hook.LastEntry().Level)
|
||||||
assert.Equal("Hello error", hook.LastEntry().Message)
|
assert.Equal(t, "Helloerror", hook.LastEntry().Message)
|
||||||
|
|
||||||
hook.Reset()
|
hook.Reset()
|
||||||
assert.Nil(hook.LastEntry())
|
assert.Nil(t, hook.LastEntry())
|
||||||
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Fatal handlers
|
#### Fatal handlers
|
||||||
@ -407,7 +483,7 @@ logrus.RegisterExitHandler(handler)
|
|||||||
...
|
...
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Thread safty
|
#### Thread safety
|
||||||
|
|
||||||
By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs.
|
By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs.
|
||||||
If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking.
|
If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking.
|
||||||
|
2
vendor/github.com/Sirupsen/logrus/alt_exit.go
generated
vendored
2
vendor/github.com/Sirupsen/logrus/alt_exit.go
generated
vendored
@ -1,7 +1,7 @@
|
|||||||
package logrus
|
package logrus
|
||||||
|
|
||||||
// The following code was sourced and modified from the
|
// The following code was sourced and modified from the
|
||||||
// https://bitbucket.org/tebeka/atexit package governed by the following license:
|
// https://github.com/tebeka/atexit package governed by the following license:
|
||||||
//
|
//
|
||||||
// Copyright (c) 2012 Miki Tebeka <miki.tebeka@gmail.com>.
|
// Copyright (c) 2012 Miki Tebeka <miki.tebeka@gmail.com>.
|
||||||
//
|
//
|
||||||
|
4
vendor/github.com/Sirupsen/logrus/doc.go
generated
vendored
4
vendor/github.com/Sirupsen/logrus/doc.go
generated
vendored
@ -7,7 +7,7 @@ The simplest way to use Logrus is simply the package-level exported logger:
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
log "github.com/Sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
@ -21,6 +21,6 @@ The simplest way to use Logrus is simply the package-level exported logger:
|
|||||||
Output:
|
Output:
|
||||||
time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
|
time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
|
||||||
|
|
||||||
For a full guide visit https://github.com/Sirupsen/logrus
|
For a full guide visit https://github.com/sirupsen/logrus
|
||||||
*/
|
*/
|
||||||
package logrus
|
package logrus
|
||||||
|
36
vendor/github.com/Sirupsen/logrus/entry.go
generated
vendored
36
vendor/github.com/Sirupsen/logrus/entry.go
generated
vendored
@ -126,7 +126,7 @@ func (entry Entry) log(level Level, msg string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Debug(args ...interface{}) {
|
func (entry *Entry) Debug(args ...interface{}) {
|
||||||
if entry.Logger.Level >= DebugLevel {
|
if entry.Logger.level() >= DebugLevel {
|
||||||
entry.log(DebugLevel, fmt.Sprint(args...))
|
entry.log(DebugLevel, fmt.Sprint(args...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -136,13 +136,13 @@ func (entry *Entry) Print(args ...interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Info(args ...interface{}) {
|
func (entry *Entry) Info(args ...interface{}) {
|
||||||
if entry.Logger.Level >= InfoLevel {
|
if entry.Logger.level() >= InfoLevel {
|
||||||
entry.log(InfoLevel, fmt.Sprint(args...))
|
entry.log(InfoLevel, fmt.Sprint(args...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Warn(args ...interface{}) {
|
func (entry *Entry) Warn(args ...interface{}) {
|
||||||
if entry.Logger.Level >= WarnLevel {
|
if entry.Logger.level() >= WarnLevel {
|
||||||
entry.log(WarnLevel, fmt.Sprint(args...))
|
entry.log(WarnLevel, fmt.Sprint(args...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -152,20 +152,20 @@ func (entry *Entry) Warning(args ...interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Error(args ...interface{}) {
|
func (entry *Entry) Error(args ...interface{}) {
|
||||||
if entry.Logger.Level >= ErrorLevel {
|
if entry.Logger.level() >= ErrorLevel {
|
||||||
entry.log(ErrorLevel, fmt.Sprint(args...))
|
entry.log(ErrorLevel, fmt.Sprint(args...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Fatal(args ...interface{}) {
|
func (entry *Entry) Fatal(args ...interface{}) {
|
||||||
if entry.Logger.Level >= FatalLevel {
|
if entry.Logger.level() >= FatalLevel {
|
||||||
entry.log(FatalLevel, fmt.Sprint(args...))
|
entry.log(FatalLevel, fmt.Sprint(args...))
|
||||||
}
|
}
|
||||||
Exit(1)
|
Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Panic(args ...interface{}) {
|
func (entry *Entry) Panic(args ...interface{}) {
|
||||||
if entry.Logger.Level >= PanicLevel {
|
if entry.Logger.level() >= PanicLevel {
|
||||||
entry.log(PanicLevel, fmt.Sprint(args...))
|
entry.log(PanicLevel, fmt.Sprint(args...))
|
||||||
}
|
}
|
||||||
panic(fmt.Sprint(args...))
|
panic(fmt.Sprint(args...))
|
||||||
@ -174,13 +174,13 @@ func (entry *Entry) Panic(args ...interface{}) {
|
|||||||
// Entry Printf family functions
|
// Entry Printf family functions
|
||||||
|
|
||||||
func (entry *Entry) Debugf(format string, args ...interface{}) {
|
func (entry *Entry) Debugf(format string, args ...interface{}) {
|
||||||
if entry.Logger.Level >= DebugLevel {
|
if entry.Logger.level() >= DebugLevel {
|
||||||
entry.Debug(fmt.Sprintf(format, args...))
|
entry.Debug(fmt.Sprintf(format, args...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Infof(format string, args ...interface{}) {
|
func (entry *Entry) Infof(format string, args ...interface{}) {
|
||||||
if entry.Logger.Level >= InfoLevel {
|
if entry.Logger.level() >= InfoLevel {
|
||||||
entry.Info(fmt.Sprintf(format, args...))
|
entry.Info(fmt.Sprintf(format, args...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -190,7 +190,7 @@ func (entry *Entry) Printf(format string, args ...interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Warnf(format string, args ...interface{}) {
|
func (entry *Entry) Warnf(format string, args ...interface{}) {
|
||||||
if entry.Logger.Level >= WarnLevel {
|
if entry.Logger.level() >= WarnLevel {
|
||||||
entry.Warn(fmt.Sprintf(format, args...))
|
entry.Warn(fmt.Sprintf(format, args...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -200,20 +200,20 @@ func (entry *Entry) Warningf(format string, args ...interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Errorf(format string, args ...interface{}) {
|
func (entry *Entry) Errorf(format string, args ...interface{}) {
|
||||||
if entry.Logger.Level >= ErrorLevel {
|
if entry.Logger.level() >= ErrorLevel {
|
||||||
entry.Error(fmt.Sprintf(format, args...))
|
entry.Error(fmt.Sprintf(format, args...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Fatalf(format string, args ...interface{}) {
|
func (entry *Entry) Fatalf(format string, args ...interface{}) {
|
||||||
if entry.Logger.Level >= FatalLevel {
|
if entry.Logger.level() >= FatalLevel {
|
||||||
entry.Fatal(fmt.Sprintf(format, args...))
|
entry.Fatal(fmt.Sprintf(format, args...))
|
||||||
}
|
}
|
||||||
Exit(1)
|
Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Panicf(format string, args ...interface{}) {
|
func (entry *Entry) Panicf(format string, args ...interface{}) {
|
||||||
if entry.Logger.Level >= PanicLevel {
|
if entry.Logger.level() >= PanicLevel {
|
||||||
entry.Panic(fmt.Sprintf(format, args...))
|
entry.Panic(fmt.Sprintf(format, args...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -221,13 +221,13 @@ func (entry *Entry) Panicf(format string, args ...interface{}) {
|
|||||||
// Entry Println family functions
|
// Entry Println family functions
|
||||||
|
|
||||||
func (entry *Entry) Debugln(args ...interface{}) {
|
func (entry *Entry) Debugln(args ...interface{}) {
|
||||||
if entry.Logger.Level >= DebugLevel {
|
if entry.Logger.level() >= DebugLevel {
|
||||||
entry.Debug(entry.sprintlnn(args...))
|
entry.Debug(entry.sprintlnn(args...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Infoln(args ...interface{}) {
|
func (entry *Entry) Infoln(args ...interface{}) {
|
||||||
if entry.Logger.Level >= InfoLevel {
|
if entry.Logger.level() >= InfoLevel {
|
||||||
entry.Info(entry.sprintlnn(args...))
|
entry.Info(entry.sprintlnn(args...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -237,7 +237,7 @@ func (entry *Entry) Println(args ...interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Warnln(args ...interface{}) {
|
func (entry *Entry) Warnln(args ...interface{}) {
|
||||||
if entry.Logger.Level >= WarnLevel {
|
if entry.Logger.level() >= WarnLevel {
|
||||||
entry.Warn(entry.sprintlnn(args...))
|
entry.Warn(entry.sprintlnn(args...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -247,20 +247,20 @@ func (entry *Entry) Warningln(args ...interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Errorln(args ...interface{}) {
|
func (entry *Entry) Errorln(args ...interface{}) {
|
||||||
if entry.Logger.Level >= ErrorLevel {
|
if entry.Logger.level() >= ErrorLevel {
|
||||||
entry.Error(entry.sprintlnn(args...))
|
entry.Error(entry.sprintlnn(args...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Fatalln(args ...interface{}) {
|
func (entry *Entry) Fatalln(args ...interface{}) {
|
||||||
if entry.Logger.Level >= FatalLevel {
|
if entry.Logger.level() >= FatalLevel {
|
||||||
entry.Fatal(entry.sprintlnn(args...))
|
entry.Fatal(entry.sprintlnn(args...))
|
||||||
}
|
}
|
||||||
Exit(1)
|
Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Panicln(args ...interface{}) {
|
func (entry *Entry) Panicln(args ...interface{}) {
|
||||||
if entry.Logger.Level >= PanicLevel {
|
if entry.Logger.level() >= PanicLevel {
|
||||||
entry.Panic(entry.sprintlnn(args...))
|
entry.Panic(entry.sprintlnn(args...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
4
vendor/github.com/Sirupsen/logrus/exported.go
generated
vendored
4
vendor/github.com/Sirupsen/logrus/exported.go
generated
vendored
@ -31,14 +31,14 @@ func SetFormatter(formatter Formatter) {
|
|||||||
func SetLevel(level Level) {
|
func SetLevel(level Level) {
|
||||||
std.mu.Lock()
|
std.mu.Lock()
|
||||||
defer std.mu.Unlock()
|
defer std.mu.Unlock()
|
||||||
std.Level = level
|
std.setLevel(level)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetLevel returns the standard logger level.
|
// GetLevel returns the standard logger level.
|
||||||
func GetLevel() Level {
|
func GetLevel() Level {
|
||||||
std.mu.Lock()
|
std.mu.Lock()
|
||||||
defer std.mu.Unlock()
|
defer std.mu.Unlock()
|
||||||
return std.Level
|
return std.level()
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddHook adds a hook to the standard logger hooks.
|
// AddHook adds a hook to the standard logger hooks.
|
||||||
|
41
vendor/github.com/Sirupsen/logrus/json_formatter.go
generated
vendored
41
vendor/github.com/Sirupsen/logrus/json_formatter.go
generated
vendored
@ -5,9 +5,40 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type fieldKey string
|
||||||
|
type FieldMap map[fieldKey]string
|
||||||
|
|
||||||
|
const (
|
||||||
|
FieldKeyMsg = "msg"
|
||||||
|
FieldKeyLevel = "level"
|
||||||
|
FieldKeyTime = "time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (f FieldMap) resolve(key fieldKey) string {
|
||||||
|
if k, ok := f[key]; ok {
|
||||||
|
return k
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(key)
|
||||||
|
}
|
||||||
|
|
||||||
type JSONFormatter struct {
|
type JSONFormatter struct {
|
||||||
// TimestampFormat sets the format used for marshaling timestamps.
|
// TimestampFormat sets the format used for marshaling timestamps.
|
||||||
TimestampFormat string
|
TimestampFormat string
|
||||||
|
|
||||||
|
// DisableTimestamp allows disabling automatic timestamps in output
|
||||||
|
DisableTimestamp bool
|
||||||
|
|
||||||
|
// FieldMap allows users to customize the names of keys for various fields.
|
||||||
|
// As an example:
|
||||||
|
// formatter := &JSONFormatter{
|
||||||
|
// FieldMap: FieldMap{
|
||||||
|
// FieldKeyTime: "@timestamp",
|
||||||
|
// FieldKeyLevel: "@level",
|
||||||
|
// FieldKeyMsg: "@message",
|
||||||
|
// },
|
||||||
|
// }
|
||||||
|
FieldMap FieldMap
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
|
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
|
||||||
@ -16,7 +47,7 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
|
|||||||
switch v := v.(type) {
|
switch v := v.(type) {
|
||||||
case error:
|
case error:
|
||||||
// Otherwise errors are ignored by `encoding/json`
|
// Otherwise errors are ignored by `encoding/json`
|
||||||
// https://github.com/Sirupsen/logrus/issues/137
|
// https://github.com/sirupsen/logrus/issues/137
|
||||||
data[k] = v.Error()
|
data[k] = v.Error()
|
||||||
default:
|
default:
|
||||||
data[k] = v
|
data[k] = v
|
||||||
@ -29,9 +60,11 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
|
|||||||
timestampFormat = DefaultTimestampFormat
|
timestampFormat = DefaultTimestampFormat
|
||||||
}
|
}
|
||||||
|
|
||||||
data["time"] = entry.Time.Format(timestampFormat)
|
if !f.DisableTimestamp {
|
||||||
data["msg"] = entry.Message
|
data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat)
|
||||||
data["level"] = entry.Level.String()
|
}
|
||||||
|
data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
|
||||||
|
data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
|
||||||
|
|
||||||
serialized, err := json.Marshal(data)
|
serialized, err := json.Marshal(data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
51
vendor/github.com/Sirupsen/logrus/logger.go
generated
vendored
51
vendor/github.com/Sirupsen/logrus/logger.go
generated
vendored
@ -4,6 +4,7 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"sync"
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Logger struct {
|
type Logger struct {
|
||||||
@ -112,7 +113,7 @@ func (logger *Logger) WithError(err error) *Entry {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Debugf(format string, args ...interface{}) {
|
func (logger *Logger) Debugf(format string, args ...interface{}) {
|
||||||
if logger.Level >= DebugLevel {
|
if logger.level() >= DebugLevel {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Debugf(format, args...)
|
entry.Debugf(format, args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
@ -120,7 +121,7 @@ func (logger *Logger) Debugf(format string, args ...interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Infof(format string, args ...interface{}) {
|
func (logger *Logger) Infof(format string, args ...interface{}) {
|
||||||
if logger.Level >= InfoLevel {
|
if logger.level() >= InfoLevel {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Infof(format, args...)
|
entry.Infof(format, args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
@ -134,7 +135,7 @@ func (logger *Logger) Printf(format string, args ...interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Warnf(format string, args ...interface{}) {
|
func (logger *Logger) Warnf(format string, args ...interface{}) {
|
||||||
if logger.Level >= WarnLevel {
|
if logger.level() >= WarnLevel {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Warnf(format, args...)
|
entry.Warnf(format, args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
@ -142,7 +143,7 @@ func (logger *Logger) Warnf(format string, args ...interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Warningf(format string, args ...interface{}) {
|
func (logger *Logger) Warningf(format string, args ...interface{}) {
|
||||||
if logger.Level >= WarnLevel {
|
if logger.level() >= WarnLevel {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Warnf(format, args...)
|
entry.Warnf(format, args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
@ -150,7 +151,7 @@ func (logger *Logger) Warningf(format string, args ...interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Errorf(format string, args ...interface{}) {
|
func (logger *Logger) Errorf(format string, args ...interface{}) {
|
||||||
if logger.Level >= ErrorLevel {
|
if logger.level() >= ErrorLevel {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Errorf(format, args...)
|
entry.Errorf(format, args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
@ -158,7 +159,7 @@ func (logger *Logger) Errorf(format string, args ...interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Fatalf(format string, args ...interface{}) {
|
func (logger *Logger) Fatalf(format string, args ...interface{}) {
|
||||||
if logger.Level >= FatalLevel {
|
if logger.level() >= FatalLevel {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Fatalf(format, args...)
|
entry.Fatalf(format, args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
@ -167,7 +168,7 @@ func (logger *Logger) Fatalf(format string, args ...interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Panicf(format string, args ...interface{}) {
|
func (logger *Logger) Panicf(format string, args ...interface{}) {
|
||||||
if logger.Level >= PanicLevel {
|
if logger.level() >= PanicLevel {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Panicf(format, args...)
|
entry.Panicf(format, args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
@ -175,7 +176,7 @@ func (logger *Logger) Panicf(format string, args ...interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Debug(args ...interface{}) {
|
func (logger *Logger) Debug(args ...interface{}) {
|
||||||
if logger.Level >= DebugLevel {
|
if logger.level() >= DebugLevel {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Debug(args...)
|
entry.Debug(args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
@ -183,7 +184,7 @@ func (logger *Logger) Debug(args ...interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Info(args ...interface{}) {
|
func (logger *Logger) Info(args ...interface{}) {
|
||||||
if logger.Level >= InfoLevel {
|
if logger.level() >= InfoLevel {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Info(args...)
|
entry.Info(args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
@ -197,7 +198,7 @@ func (logger *Logger) Print(args ...interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Warn(args ...interface{}) {
|
func (logger *Logger) Warn(args ...interface{}) {
|
||||||
if logger.Level >= WarnLevel {
|
if logger.level() >= WarnLevel {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Warn(args...)
|
entry.Warn(args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
@ -205,7 +206,7 @@ func (logger *Logger) Warn(args ...interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Warning(args ...interface{}) {
|
func (logger *Logger) Warning(args ...interface{}) {
|
||||||
if logger.Level >= WarnLevel {
|
if logger.level() >= WarnLevel {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Warn(args...)
|
entry.Warn(args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
@ -213,7 +214,7 @@ func (logger *Logger) Warning(args ...interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Error(args ...interface{}) {
|
func (logger *Logger) Error(args ...interface{}) {
|
||||||
if logger.Level >= ErrorLevel {
|
if logger.level() >= ErrorLevel {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Error(args...)
|
entry.Error(args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
@ -221,7 +222,7 @@ func (logger *Logger) Error(args ...interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Fatal(args ...interface{}) {
|
func (logger *Logger) Fatal(args ...interface{}) {
|
||||||
if logger.Level >= FatalLevel {
|
if logger.level() >= FatalLevel {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Fatal(args...)
|
entry.Fatal(args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
@ -230,7 +231,7 @@ func (logger *Logger) Fatal(args ...interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Panic(args ...interface{}) {
|
func (logger *Logger) Panic(args ...interface{}) {
|
||||||
if logger.Level >= PanicLevel {
|
if logger.level() >= PanicLevel {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Panic(args...)
|
entry.Panic(args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
@ -238,7 +239,7 @@ func (logger *Logger) Panic(args ...interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Debugln(args ...interface{}) {
|
func (logger *Logger) Debugln(args ...interface{}) {
|
||||||
if logger.Level >= DebugLevel {
|
if logger.level() >= DebugLevel {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Debugln(args...)
|
entry.Debugln(args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
@ -246,7 +247,7 @@ func (logger *Logger) Debugln(args ...interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Infoln(args ...interface{}) {
|
func (logger *Logger) Infoln(args ...interface{}) {
|
||||||
if logger.Level >= InfoLevel {
|
if logger.level() >= InfoLevel {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Infoln(args...)
|
entry.Infoln(args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
@ -260,7 +261,7 @@ func (logger *Logger) Println(args ...interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Warnln(args ...interface{}) {
|
func (logger *Logger) Warnln(args ...interface{}) {
|
||||||
if logger.Level >= WarnLevel {
|
if logger.level() >= WarnLevel {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Warnln(args...)
|
entry.Warnln(args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
@ -268,7 +269,7 @@ func (logger *Logger) Warnln(args ...interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Warningln(args ...interface{}) {
|
func (logger *Logger) Warningln(args ...interface{}) {
|
||||||
if logger.Level >= WarnLevel {
|
if logger.level() >= WarnLevel {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Warnln(args...)
|
entry.Warnln(args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
@ -276,7 +277,7 @@ func (logger *Logger) Warningln(args ...interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Errorln(args ...interface{}) {
|
func (logger *Logger) Errorln(args ...interface{}) {
|
||||||
if logger.Level >= ErrorLevel {
|
if logger.level() >= ErrorLevel {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Errorln(args...)
|
entry.Errorln(args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
@ -284,7 +285,7 @@ func (logger *Logger) Errorln(args ...interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Fatalln(args ...interface{}) {
|
func (logger *Logger) Fatalln(args ...interface{}) {
|
||||||
if logger.Level >= FatalLevel {
|
if logger.level() >= FatalLevel {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Fatalln(args...)
|
entry.Fatalln(args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
@ -293,7 +294,7 @@ func (logger *Logger) Fatalln(args ...interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Panicln(args ...interface{}) {
|
func (logger *Logger) Panicln(args ...interface{}) {
|
||||||
if logger.Level >= PanicLevel {
|
if logger.level() >= PanicLevel {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Panicln(args...)
|
entry.Panicln(args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
@ -306,3 +307,11 @@ func (logger *Logger) Panicln(args ...interface{}) {
|
|||||||
func (logger *Logger) SetNoLock() {
|
func (logger *Logger) SetNoLock() {
|
||||||
logger.mu.Disable()
|
logger.mu.Disable()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) level() Level {
|
||||||
|
return Level(atomic.LoadUint32((*uint32)(&logger.Level)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) setLevel(level Level) {
|
||||||
|
atomic.StoreUint32((*uint32)(&logger.Level), uint32(level))
|
||||||
|
}
|
||||||
|
2
vendor/github.com/Sirupsen/logrus/logrus.go
generated
vendored
2
vendor/github.com/Sirupsen/logrus/logrus.go
generated
vendored
@ -10,7 +10,7 @@ import (
|
|||||||
type Fields map[string]interface{}
|
type Fields map[string]interface{}
|
||||||
|
|
||||||
// Level type
|
// Level type
|
||||||
type Level uint8
|
type Level uint32
|
||||||
|
|
||||||
// Convert the Level to a string. E.g. PanicLevel becomes "panic".
|
// Convert the Level to a string. E.g. PanicLevel becomes "panic".
|
||||||
func (level Level) String() string {
|
func (level Level) String() string {
|
||||||
|
4
vendor/github.com/Sirupsen/logrus/terminal_appengine.go
generated
vendored
4
vendor/github.com/Sirupsen/logrus/terminal_appengine.go
generated
vendored
@ -2,7 +2,9 @@
|
|||||||
|
|
||||||
package logrus
|
package logrus
|
||||||
|
|
||||||
|
import "io"
|
||||||
|
|
||||||
// IsTerminal returns true if stderr's file descriptor is a terminal.
|
// IsTerminal returns true if stderr's file descriptor is a terminal.
|
||||||
func IsTerminal() bool {
|
func IsTerminal(f io.Writer) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
12
vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
generated
vendored
12
vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
generated
vendored
@ -9,14 +9,20 @@
|
|||||||
package logrus
|
package logrus
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
"syscall"
|
"syscall"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
)
|
)
|
||||||
|
|
||||||
// IsTerminal returns true if stderr's file descriptor is a terminal.
|
// IsTerminal returns true if stderr's file descriptor is a terminal.
|
||||||
func IsTerminal() bool {
|
func IsTerminal(f io.Writer) bool {
|
||||||
fd := syscall.Stderr
|
|
||||||
var termios Termios
|
var termios Termios
|
||||||
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
|
switch v := f.(type) {
|
||||||
|
case *os.File:
|
||||||
|
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(v.Fd()), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
|
||||||
return err == 0
|
return err == 0
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
10
vendor/github.com/Sirupsen/logrus/terminal_solaris.go
generated
vendored
10
vendor/github.com/Sirupsen/logrus/terminal_solaris.go
generated
vendored
@ -3,13 +3,19 @@
|
|||||||
package logrus
|
package logrus
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"io"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"golang.org/x/sys/unix"
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
// IsTerminal returns true if the given file descriptor is a terminal.
|
// IsTerminal returns true if the given file descriptor is a terminal.
|
||||||
func IsTerminal() bool {
|
func IsTerminal(f io.Writer) bool {
|
||||||
_, err := unix.IoctlGetTermios(int(os.Stdout.Fd()), unix.TCGETA)
|
switch v := f.(type) {
|
||||||
|
case *os.File:
|
||||||
|
_, err := unix.IoctlGetTermios(int(v.Fd()), unix.TCGETA)
|
||||||
return err == nil
|
return err == nil
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
67
vendor/github.com/Sirupsen/logrus/terminal_windows.go
generated
vendored
67
vendor/github.com/Sirupsen/logrus/terminal_windows.go
generated
vendored
@ -8,6 +8,13 @@
|
|||||||
package logrus
|
package logrus
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
"syscall"
|
"syscall"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
)
|
)
|
||||||
@ -16,12 +23,60 @@ var kernel32 = syscall.NewLazyDLL("kernel32.dll")
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
|
procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
|
||||||
|
procSetConsoleMode = kernel32.NewProc("SetConsoleMode")
|
||||||
)
|
)
|
||||||
|
|
||||||
// IsTerminal returns true if stderr's file descriptor is a terminal.
|
const (
|
||||||
func IsTerminal() bool {
|
enableProcessedOutput = 0x0001
|
||||||
fd := syscall.Stderr
|
enableWrapAtEolOutput = 0x0002
|
||||||
var st uint32
|
enableVirtualTerminalProcessing = 0x0004
|
||||||
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
|
)
|
||||||
return r != 0 && e == 0
|
|
||||||
|
func getVersion() (float64, error) {
|
||||||
|
stdout, stderr := &bytes.Buffer{}, &bytes.Buffer{}
|
||||||
|
cmd := exec.Command("cmd", "ver")
|
||||||
|
cmd.Stdout = stdout
|
||||||
|
cmd.Stderr = stderr
|
||||||
|
err := cmd.Run()
|
||||||
|
if err != nil {
|
||||||
|
return -1, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// The output should be like "Microsoft Windows [Version XX.X.XXXXXX]"
|
||||||
|
version := strings.Replace(stdout.String(), "\n", "", -1)
|
||||||
|
version = strings.Replace(version, "\r\n", "", -1)
|
||||||
|
|
||||||
|
x1 := strings.Index(version, "[Version")
|
||||||
|
|
||||||
|
if x1 == -1 || strings.Index(version, "]") == -1 {
|
||||||
|
return -1, errors.New("Can't determine Windows version")
|
||||||
|
}
|
||||||
|
|
||||||
|
return strconv.ParseFloat(version[x1+9:x1+13], 64)
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
ver, err := getVersion()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Activate Virtual Processing for Windows CMD
|
||||||
|
// Info: https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx
|
||||||
|
if ver >= 10 {
|
||||||
|
handle := syscall.Handle(os.Stderr.Fd())
|
||||||
|
procSetConsoleMode.Call(uintptr(handle), enableProcessedOutput|enableWrapAtEolOutput|enableVirtualTerminalProcessing)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsTerminal returns true if stderr's file descriptor is a terminal.
|
||||||
|
func IsTerminal(f io.Writer) bool {
|
||||||
|
switch v := f.(type) {
|
||||||
|
case *os.File:
|
||||||
|
var st uint32
|
||||||
|
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(v.Fd()), uintptr(unsafe.Pointer(&st)), 0)
|
||||||
|
return r != 0 && e == 0
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
55
vendor/github.com/Sirupsen/logrus/text_formatter.go
generated
vendored
55
vendor/github.com/Sirupsen/logrus/text_formatter.go
generated
vendored
@ -3,9 +3,9 @@ package logrus
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"runtime"
|
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -20,16 +20,10 @@ const (
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
baseTimestamp time.Time
|
baseTimestamp time.Time
|
||||||
isTerminal bool
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
baseTimestamp = time.Now()
|
baseTimestamp = time.Now()
|
||||||
isTerminal = IsTerminal()
|
|
||||||
}
|
|
||||||
|
|
||||||
func miniTS() int {
|
|
||||||
return int(time.Since(baseTimestamp) / time.Second)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type TextFormatter struct {
|
type TextFormatter struct {
|
||||||
@ -54,11 +48,32 @@ type TextFormatter struct {
|
|||||||
// that log extremely frequently and don't use the JSON formatter this may not
|
// that log extremely frequently and don't use the JSON formatter this may not
|
||||||
// be desired.
|
// be desired.
|
||||||
DisableSorting bool
|
DisableSorting bool
|
||||||
|
|
||||||
|
// QuoteEmptyFields will wrap empty fields in quotes if true
|
||||||
|
QuoteEmptyFields bool
|
||||||
|
|
||||||
|
// QuoteCharacter can be set to the override the default quoting character "
|
||||||
|
// with something else. For example: ', or `.
|
||||||
|
QuoteCharacter string
|
||||||
|
|
||||||
|
// Whether the logger's out is to a terminal
|
||||||
|
isTerminal bool
|
||||||
|
|
||||||
|
sync.Once
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *TextFormatter) init(entry *Entry) {
|
||||||
|
if len(f.QuoteCharacter) == 0 {
|
||||||
|
f.QuoteCharacter = "\""
|
||||||
|
}
|
||||||
|
if entry.Logger != nil {
|
||||||
|
f.isTerminal = IsTerminal(entry.Logger.Out)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
|
func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
|
||||||
var b *bytes.Buffer
|
var b *bytes.Buffer
|
||||||
var keys []string = make([]string, 0, len(entry.Data))
|
keys := make([]string, 0, len(entry.Data))
|
||||||
for k := range entry.Data {
|
for k := range entry.Data {
|
||||||
keys = append(keys, k)
|
keys = append(keys, k)
|
||||||
}
|
}
|
||||||
@ -74,8 +89,9 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
|
|||||||
|
|
||||||
prefixFieldClashes(entry.Data)
|
prefixFieldClashes(entry.Data)
|
||||||
|
|
||||||
isColorTerminal := isTerminal && (runtime.GOOS != "windows")
|
f.Do(func() { f.init(entry) })
|
||||||
isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors
|
|
||||||
|
isColored := (f.ForceColors || f.isTerminal) && !f.DisableColors
|
||||||
|
|
||||||
timestampFormat := f.TimestampFormat
|
timestampFormat := f.TimestampFormat
|
||||||
if timestampFormat == "" {
|
if timestampFormat == "" {
|
||||||
@ -115,8 +131,10 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin
|
|||||||
|
|
||||||
levelText := strings.ToUpper(entry.Level.String())[0:4]
|
levelText := strings.ToUpper(entry.Level.String())[0:4]
|
||||||
|
|
||||||
if !f.FullTimestamp {
|
if f.DisableTimestamp {
|
||||||
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message)
|
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message)
|
||||||
|
} else if !f.FullTimestamp {
|
||||||
|
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), entry.Message)
|
||||||
} else {
|
} else {
|
||||||
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
|
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
|
||||||
}
|
}
|
||||||
@ -127,7 +145,10 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func needsQuoting(text string) bool {
|
func (f *TextFormatter) needsQuoting(text string) bool {
|
||||||
|
if f.QuoteEmptyFields && len(text) == 0 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
for _, ch := range text {
|
for _, ch := range text {
|
||||||
if !((ch >= 'a' && ch <= 'z') ||
|
if !((ch >= 'a' && ch <= 'z') ||
|
||||||
(ch >= 'A' && ch <= 'Z') ||
|
(ch >= 'A' && ch <= 'Z') ||
|
||||||
@ -150,17 +171,17 @@ func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interf
|
|||||||
func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) {
|
func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) {
|
||||||
switch value := value.(type) {
|
switch value := value.(type) {
|
||||||
case string:
|
case string:
|
||||||
if !needsQuoting(value) {
|
if !f.needsQuoting(value) {
|
||||||
b.WriteString(value)
|
b.WriteString(value)
|
||||||
} else {
|
} else {
|
||||||
fmt.Fprintf(b, "%q", value)
|
fmt.Fprintf(b, "%s%v%s", f.QuoteCharacter, value, f.QuoteCharacter)
|
||||||
}
|
}
|
||||||
case error:
|
case error:
|
||||||
errmsg := value.Error()
|
errmsg := value.Error()
|
||||||
if !needsQuoting(errmsg) {
|
if !f.needsQuoting(errmsg) {
|
||||||
b.WriteString(errmsg)
|
b.WriteString(errmsg)
|
||||||
} else {
|
} else {
|
||||||
fmt.Fprintf(b, "%q", errmsg)
|
fmt.Fprintf(b, "%s%v%s", f.QuoteCharacter, errmsg, f.QuoteCharacter)
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
fmt.Fprint(b, value)
|
fmt.Fprint(b, value)
|
||||||
|
29
vendor/github.com/Sirupsen/logrus/writer.go
generated
vendored
29
vendor/github.com/Sirupsen/logrus/writer.go
generated
vendored
@ -11,39 +11,48 @@ func (logger *Logger) Writer() *io.PipeWriter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) WriterLevel(level Level) *io.PipeWriter {
|
func (logger *Logger) WriterLevel(level Level) *io.PipeWriter {
|
||||||
|
return NewEntry(logger).WriterLevel(level)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Writer() *io.PipeWriter {
|
||||||
|
return entry.WriterLevel(InfoLevel)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) WriterLevel(level Level) *io.PipeWriter {
|
||||||
reader, writer := io.Pipe()
|
reader, writer := io.Pipe()
|
||||||
|
|
||||||
var printFunc func(args ...interface{})
|
var printFunc func(args ...interface{})
|
||||||
|
|
||||||
switch level {
|
switch level {
|
||||||
case DebugLevel:
|
case DebugLevel:
|
||||||
printFunc = logger.Debug
|
printFunc = entry.Debug
|
||||||
case InfoLevel:
|
case InfoLevel:
|
||||||
printFunc = logger.Info
|
printFunc = entry.Info
|
||||||
case WarnLevel:
|
case WarnLevel:
|
||||||
printFunc = logger.Warn
|
printFunc = entry.Warn
|
||||||
case ErrorLevel:
|
case ErrorLevel:
|
||||||
printFunc = logger.Error
|
printFunc = entry.Error
|
||||||
case FatalLevel:
|
case FatalLevel:
|
||||||
printFunc = logger.Fatal
|
printFunc = entry.Fatal
|
||||||
case PanicLevel:
|
case PanicLevel:
|
||||||
printFunc = logger.Panic
|
printFunc = entry.Panic
|
||||||
default:
|
default:
|
||||||
printFunc = logger.Print
|
printFunc = entry.Print
|
||||||
}
|
}
|
||||||
|
|
||||||
go logger.writerScanner(reader, printFunc)
|
go entry.writerScanner(reader, printFunc)
|
||||||
runtime.SetFinalizer(writer, writerFinalizer)
|
runtime.SetFinalizer(writer, writerFinalizer)
|
||||||
|
|
||||||
return writer
|
return writer
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
|
func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
|
||||||
scanner := bufio.NewScanner(reader)
|
scanner := bufio.NewScanner(reader)
|
||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
printFunc(scanner.Text())
|
printFunc(scanner.Text())
|
||||||
}
|
}
|
||||||
if err := scanner.Err(); err != nil {
|
if err := scanner.Err(); err != nil {
|
||||||
logger.Errorf("Error while reading from Writer: %s", err)
|
entry.Errorf("Error while reading from Writer: %s", err)
|
||||||
}
|
}
|
||||||
reader.Close()
|
reader.Close()
|
||||||
}
|
}
|
||||||
|
37
vendor/github.com/containerd/containerd/.appveyor.yml
generated
vendored
Normal file
37
vendor/github.com/containerd/containerd/.appveyor.yml
generated
vendored
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
version: "{build}"
|
||||||
|
|
||||||
|
image: Visual Studio 2017
|
||||||
|
|
||||||
|
clone_folder: c:\gopath\src\github.com\containerd\containerd
|
||||||
|
|
||||||
|
branches:
|
||||||
|
only:
|
||||||
|
- master
|
||||||
|
|
||||||
|
environment:
|
||||||
|
GOPATH: C:\gopath
|
||||||
|
CGO_ENABLED: 1
|
||||||
|
|
||||||
|
before_build:
|
||||||
|
- choco install -y mingw
|
||||||
|
# TODO: re-enable once the content unit-test have been updated to pass on windows
|
||||||
|
#- choco install codecov
|
||||||
|
|
||||||
|
build_script:
|
||||||
|
- bash.exe -lc "export PATH=/c/tools/mingw64/bin:/c/gopath/src/github.com/containerd/containerd/bin:$PATH ; mingw32-make.exe fmt"
|
||||||
|
- bash.exe -lc "export PATH=/c/tools/mingw64/bin:/c/gopath/src/github.com/containerd/containerd/bin:$PATH ; mingw32-make.exe vet"
|
||||||
|
- bash.exe -lc "export PATH=/c/tools/mingw64/bin:$PATH ; mingw32-make.exe build"
|
||||||
|
- bash.exe -lc "export PATH=/c/tools/mingw64/bin:$PATH ; mingw32-make.exe binaries"
|
||||||
|
|
||||||
|
test_script:
|
||||||
|
# TODO: need an equivalent of TRAVIS_COMMIT_RANGE
|
||||||
|
# - GIT_CHECK_EXCLUDE="./vendor" TRAVIS_COMMIT_RANGE="${TRAVIS_COMMIT_RANGE/.../..}" C:\MinGW\bin\mingw32-make.exe dco
|
||||||
|
- bash.exe -lc "export PATH=/c/tools/mingw64/bin:/c/gopath/src/github.com/containerd/containerd/bin:$PATH ; mingw32-make.exe integration"
|
||||||
|
# TODO: re-enable once the content unit-test have been updated to pass on windows
|
||||||
|
#- bash.exe -lc "export PATH=/c/tools/mingw64/bin:/c/gopath/src/github.com/containerd/containerd/bin:$PATH ; mingw32-make.exe coverage"
|
||||||
|
#- bash.exe -lc "export PATH=/c/tools/mingw64/bin:/c/gopath/src/github.com/containerd/containerd/bin:$PATH ; mingw32-make.exe root-coverage"
|
||||||
|
|
||||||
|
on_success:
|
||||||
|
# Note that, a Codecov upload token is not required.
|
||||||
|
# TODO: re-enable once the content unit-test have been updated to pass on windows
|
||||||
|
#- codecov -f coverage.txt
|
8
vendor/github.com/containerd/containerd/.travis.yml
generated
vendored
8
vendor/github.com/containerd/containerd/.travis.yml
generated
vendored
@ -32,13 +32,16 @@ env:
|
|||||||
- TRAVIS_GOOS=linux TRAVIS_CGO_ENABLED=1
|
- TRAVIS_GOOS=linux TRAVIS_CGO_ENABLED=1
|
||||||
- TRAVIS_GOOS=darwin TRAVIS_CGO_ENABLED=0
|
- TRAVIS_GOOS=darwin TRAVIS_CGO_ENABLED=0
|
||||||
|
|
||||||
|
before_install:
|
||||||
|
- uname -r
|
||||||
|
|
||||||
install:
|
install:
|
||||||
- if [ "$TRAVIS_GOOS" = "windows" ] ; then sudo apt-get install -y gcc-multilib gcc-mingw-w64; export CC=x86_64-w64-mingw32-gcc ; export CXX=x86_64-w64-mingw32-g++ ; fi
|
- if [ "$TRAVIS_GOOS" = "windows" ] ; then sudo apt-get install -y gcc-multilib gcc-mingw-w64; export CC=x86_64-w64-mingw32-gcc ; export CXX=x86_64-w64-mingw32-g++ ; fi
|
||||||
- wget https://github.com/google/protobuf/releases/download/v3.1.0/protoc-3.1.0-linux-x86_64.zip -O /tmp/protoc-3.1.0-linux-x86_64.zip
|
- wget https://github.com/google/protobuf/releases/download/v3.1.0/protoc-3.1.0-linux-x86_64.zip -O /tmp/protoc-3.1.0-linux-x86_64.zip
|
||||||
- unzip -o -d /tmp/protobuf /tmp/protoc-3.1.0-linux-x86_64.zip
|
- unzip -o -d /tmp/protobuf /tmp/protoc-3.1.0-linux-x86_64.zip
|
||||||
- export PATH=$PATH:/tmp/protobuf/bin/
|
- export PATH=$PATH:/tmp/protobuf/bin/
|
||||||
- go get -u github.com/vbatts/git-validation
|
- go get -u github.com/vbatts/git-validation
|
||||||
- sudo wget https://github.com/crosbymichael/runc/releases/download/ctd-1/runc -O /bin/runc; sudo chmod +x /bin/runc
|
- sudo wget https://github.com/crosbymichael/runc/releases/download/ctd-4/runc -O /bin/runc; sudo chmod +x /bin/runc
|
||||||
- wget https://github.com/xemul/criu/archive/v3.0.tar.gz -O /tmp/criu.tar.gz
|
- wget https://github.com/xemul/criu/archive/v3.0.tar.gz -O /tmp/criu.tar.gz
|
||||||
- tar -C /tmp/ -zxf /tmp/criu.tar.gz
|
- tar -C /tmp/ -zxf /tmp/criu.tar.gz
|
||||||
- cd /tmp/criu-3.0 && sudo make install-criu
|
- cd /tmp/criu-3.0 && sudo make install-criu
|
||||||
@ -49,6 +52,9 @@ script:
|
|||||||
- export CGO_ENABLED=$TRAVIS_CGO_ENABLED
|
- export CGO_ENABLED=$TRAVIS_CGO_ENABLED
|
||||||
- GIT_CHECK_EXCLUDE="./vendor" TRAVIS_COMMIT_RANGE="${TRAVIS_COMMIT_RANGE/.../..}" make dco
|
- GIT_CHECK_EXCLUDE="./vendor" TRAVIS_COMMIT_RANGE="${TRAVIS_COMMIT_RANGE/.../..}" make dco
|
||||||
- make fmt
|
- make fmt
|
||||||
|
# FIXME: For non-linux GOOS, without running `go build -i`, vet fails with `vet: import failed: can't find import: fmt`...
|
||||||
|
# Note that `go build -i` requires write permission to GOROOT. (So it is not called in Makefile)
|
||||||
|
- go build -i .
|
||||||
- make vet
|
- make vet
|
||||||
- make build
|
- make build
|
||||||
- make binaries
|
- make binaries
|
||||||
|
2
vendor/github.com/containerd/containerd/BUILDING.md
generated
vendored
2
vendor/github.com/containerd/containerd/BUILDING.md
generated
vendored
@ -9,6 +9,8 @@ In first you need to setup your Go development environment. You can follow this
|
|||||||
guideline [How to write go code](https://golang.org/doc/code.html) and at the
|
guideline [How to write go code](https://golang.org/doc/code.html) and at the
|
||||||
end you need to have `GOPATH` and `GOROOT` set in your environment.
|
end you need to have `GOPATH` and `GOROOT` set in your environment.
|
||||||
|
|
||||||
|
Current containerd requires Go 1.8.x or above.
|
||||||
|
|
||||||
At this point you can use `go` to checkout `containerd` in your `GOPATH`:
|
At this point you can use `go` to checkout `containerd` in your `GOPATH`:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
|
57
vendor/github.com/containerd/containerd/Makefile
generated
vendored
57
vendor/github.com/containerd/containerd/Makefile
generated
vendored
@ -8,27 +8,33 @@ DESTDIR=/usr/local
|
|||||||
VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always)
|
VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always)
|
||||||
REVISION=$(shell git rev-parse HEAD)$(shell if ! git diff --no-ext-diff --quiet --exit-code; then echo .m; fi)
|
REVISION=$(shell git rev-parse HEAD)$(shell if ! git diff --no-ext-diff --quiet --exit-code; then echo .m; fi)
|
||||||
|
|
||||||
PKG=github.com/containerd/containerd
|
|
||||||
|
|
||||||
ifneq "$(strip $(shell command -v go 2>/dev/null))" ""
|
ifneq "$(strip $(shell command -v go 2>/dev/null))" ""
|
||||||
GOOS ?= $(shell go env GOOS)
|
GOOS ?= $(shell go env GOOS)
|
||||||
else
|
else
|
||||||
GOOS ?= $$GOOS
|
GOOS ?= $$GOOS
|
||||||
endif
|
endif
|
||||||
WHALE = "🐳"
|
|
||||||
|
WHALE = "🇩"
|
||||||
ONI = "👹"
|
ONI = "👹"
|
||||||
|
FIX_PATH = $1
|
||||||
ifeq ("$(OS)", "Windows_NT")
|
ifeq ("$(OS)", "Windows_NT")
|
||||||
WHALE="+"
|
WHALE="+"
|
||||||
ONI="-"
|
ONI="-"
|
||||||
|
FIX_PATH = $(subst /,\,$1)
|
||||||
endif
|
endif
|
||||||
|
GOARCH ?= $(shell go env GOARCH)
|
||||||
|
|
||||||
|
RELEASE=containerd-$(VERSION:v%=%).${GOOS}-${GOARCH}
|
||||||
|
|
||||||
|
PKG=github.com/containerd/containerd
|
||||||
|
|
||||||
# Project packages.
|
# Project packages.
|
||||||
PACKAGES=$(shell go list ./... | grep -v /vendor/)
|
PACKAGES=$(shell go list ./... | grep -v /vendor/)
|
||||||
INTEGRATION_PACKAGE=${PKG}/integration
|
INTEGRATION_PACKAGE=${PKG}
|
||||||
SNAPSHOT_PACKAGES=$(shell go list ./snapshot/...)
|
TEST_REQUIRES_ROOT_PACKAGES=$(shell for f in $$(git grep -l testutil.RequiresRoot | grep -v Makefile);do echo "${PKG}/$$(dirname $$f)"; done)
|
||||||
|
|
||||||
# Project binaries.
|
# Project binaries.
|
||||||
COMMANDS=ctr containerd protoc-gen-gogoctrd dist ctrd-protobuild
|
COMMANDS=ctr containerd
|
||||||
ifneq ("$(GOOS)", "windows")
|
ifneq ("$(GOOS)", "windows")
|
||||||
COMMANDS += containerd-shim
|
COMMANDS += containerd-shim
|
||||||
endif
|
endif
|
||||||
@ -41,9 +47,9 @@ GO_TAGS=$(if $(BUILDTAGS),-tags "$(BUILDTAGS)",)
|
|||||||
GO_LDFLAGS=-ldflags "-X $(PKG)/version.Version=$(VERSION) -X $(PKG)/version.Revision=$(REVISION) -X $(PKG)/version.Package=$(PKG) $(EXTRA_LDFLAGS)"
|
GO_LDFLAGS=-ldflags "-X $(PKG)/version.Version=$(VERSION) -X $(PKG)/version.Revision=$(REVISION) -X $(PKG)/version.Package=$(PKG) $(EXTRA_LDFLAGS)"
|
||||||
|
|
||||||
# Flags passed to `go test`
|
# Flags passed to `go test`
|
||||||
TESTFLAGS ?=-parallel 8 -race
|
TESTFLAGS ?=-parallel 8 -race -v
|
||||||
|
|
||||||
.PHONY: clean all AUTHORS fmt vet lint dco build binaries test integration setup generate protos checkprotos coverage ci check help install uninstall vendor
|
.PHONY: clean all AUTHORS fmt vet lint dco build binaries test integration setup generate protos checkprotos coverage ci check help install uninstall vendor release
|
||||||
.DEFAULT: default
|
.DEFAULT: default
|
||||||
|
|
||||||
all: binaries
|
all: binaries
|
||||||
@ -61,14 +67,15 @@ setup: ## install dependencies
|
|||||||
@go get -u github.com/golang/lint/golint
|
@go get -u github.com/golang/lint/golint
|
||||||
#@go get -u github.com/kisielk/errcheck
|
#@go get -u github.com/kisielk/errcheck
|
||||||
@go get -u github.com/gordonklaus/ineffassign
|
@go get -u github.com/gordonklaus/ineffassign
|
||||||
|
@go get -u github.com/stevvooe/protobuild
|
||||||
|
|
||||||
generate: protos
|
generate: protos
|
||||||
@echo "$(WHALE) $@"
|
@echo "$(WHALE) $@"
|
||||||
@PATH=${ROOTDIR}/bin:${PATH} go generate -x ${PACKAGES}
|
@PATH=${ROOTDIR}/bin:${PATH} go generate -x ${PACKAGES}
|
||||||
|
|
||||||
protos: bin/protoc-gen-gogoctrd bin/ctrd-protobuild ## generate protobuf
|
protos: bin/protoc-gen-gogoctrd ## generate protobuf
|
||||||
@echo "$(WHALE) $@"
|
@echo "$(WHALE) $@"
|
||||||
@PATH=${ROOTDIR}/bin:${PATH} ctrd-protobuild ${PACKAGES}
|
@PATH=${ROOTDIR}/bin:${PATH} protobuild ${PACKAGES}
|
||||||
|
|
||||||
checkprotos: protos ## check if protobufs needs to be generated again
|
checkprotos: protos ## check if protobufs needs to be generated again
|
||||||
@echo "$(WHALE) $@"
|
@echo "$(WHALE) $@"
|
||||||
@ -84,7 +91,7 @@ vet: binaries ## run go vet
|
|||||||
|
|
||||||
fmt: ## run go fmt
|
fmt: ## run go fmt
|
||||||
@echo "$(WHALE) $@"
|
@echo "$(WHALE) $@"
|
||||||
@test -z "$$(gofmt -s -l . | grep -v vendor/ | grep -v ".pb.go$$" | tee /dev/stderr)" || \
|
@test -z "$$(gofmt -s -l . | grep -Fv $(call FIX_PATH,'vendor/') | grep -v ".pb.go$$" | tee /dev/stderr)" || \
|
||||||
(echo "$(ONI) please format Go code with 'gofmt -s -w'" && false)
|
(echo "$(ONI) please format Go code with 'gofmt -s -w'" && false)
|
||||||
@test -z "$$(find . -path ./vendor -prune -o ! -name timestamp.proto ! -name duration.proto -name '*.proto' -type f -exec grep -Hn -e "^ " {} \; | tee /dev/stderr)" || \
|
@test -z "$$(find . -path ./vendor -prune -o ! -name timestamp.proto ! -name duration.proto -name '*.proto' -type f -exec grep -Hn -e "^ " {} \; | tee /dev/stderr)" || \
|
||||||
(echo "$(ONI) please indent proto files with tabs only" && false)
|
(echo "$(ONI) please indent proto files with tabs only" && false)
|
||||||
@ -93,7 +100,7 @@ fmt: ## run go fmt
|
|||||||
|
|
||||||
lint: ## run go lint
|
lint: ## run go lint
|
||||||
@echo "$(WHALE) $@"
|
@echo "$(WHALE) $@"
|
||||||
@test -z "$$(golint ./... | grep -v vendor/ | grep -v ".pb.go:" | tee /dev/stderr)"
|
@test -z "$$(golint ./... | grep -Fv $(call FIX_PATH,'vendor/') | grep -v ".pb.go:" | tee /dev/stderr)"
|
||||||
|
|
||||||
dco: ## dco check
|
dco: ## dco check
|
||||||
@which git-validation > /dev/null 2>/dev/null || (echo "ERROR: git-validation not found" && false)
|
@which git-validation > /dev/null 2>/dev/null || (echo "ERROR: git-validation not found" && false)
|
||||||
@ -105,15 +112,15 @@ endif
|
|||||||
|
|
||||||
ineffassign: ## run ineffassign
|
ineffassign: ## run ineffassign
|
||||||
@echo "$(WHALE) $@"
|
@echo "$(WHALE) $@"
|
||||||
@test -z "$$(ineffassign . | grep -v vendor/ | grep -v ".pb.go:" | tee /dev/stderr)"
|
@test -z "$$(ineffassign . | grep -Fv $(call FIX_PATH,'vendor/') | grep -v ".pb.go:" | tee /dev/stderr)"
|
||||||
|
|
||||||
#errcheck: ## run go errcheck
|
#errcheck: ## run go errcheck
|
||||||
# @echo "$(WHALE) $@"
|
# @echo "$(WHALE) $@"
|
||||||
# @test -z "$$(errcheck ./... | grep -v vendor/ | grep -v ".pb.go:" | tee /dev/stderr)"
|
# @test -z "$$(errcheck ./... | grep -Fv $(call FIX_PATH,'vendor/') | grep -v ".pb.go:" | tee /dev/stderr)"
|
||||||
|
|
||||||
build: ## build the go packages
|
build: ## build the go packages
|
||||||
@echo "$(WHALE) $@"
|
@echo "$(WHALE) $@"
|
||||||
@go build -i -v ${EXTRA_FLAGS} ${GO_LDFLAGS} ${GO_GCFLAGS} ${PACKAGES}
|
@go build -v ${EXTRA_FLAGS} ${GO_LDFLAGS} ${GO_GCFLAGS} ${PACKAGES}
|
||||||
|
|
||||||
test: ## run tests, except integration tests and tests that require root
|
test: ## run tests, except integration tests and tests that require root
|
||||||
@echo "$(WHALE) $@"
|
@echo "$(WHALE) $@"
|
||||||
@ -121,22 +128,32 @@ test: ## run tests, except integration tests and tests that require root
|
|||||||
|
|
||||||
root-test: ## run tests, except integration tests
|
root-test: ## run tests, except integration tests
|
||||||
@echo "$(WHALE) $@"
|
@echo "$(WHALE) $@"
|
||||||
@go test ${TESTFLAGS} ${SNAPSHOT_PACKAGES} -test.root
|
@go test ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${TEST_REQUIRES_ROOT_PACKAGES}) -test.root
|
||||||
|
|
||||||
integration: ## run integration tests
|
integration: ## run integration tests
|
||||||
@echo "$(WHALE) $@"
|
@echo "$(WHALE) $@"
|
||||||
@go test ${TESTFLAGS}
|
@go test ${TESTFLAGS} -test.root
|
||||||
|
|
||||||
|
benchmark: ## run benchmarks tests
|
||||||
|
@echo "$(WHALE) $@"
|
||||||
|
@go test ${TESTFLAGS} -bench . -run Benchmark -test.root
|
||||||
|
|
||||||
FORCE:
|
FORCE:
|
||||||
|
|
||||||
# Build a binary from a cmd.
|
# Build a binary from a cmd.
|
||||||
bin/%: cmd/% FORCE
|
bin/%: cmd/% FORCE
|
||||||
@echo "$(WHALE) $@${BINARY_SUFFIX}"
|
@echo "$(WHALE) $@${BINARY_SUFFIX}"
|
||||||
@go build -i -o $@${BINARY_SUFFIX} ${GO_LDFLAGS} ${GO_TAGS} ${GO_GCFLAGS} ./$<
|
@go build -o $@${BINARY_SUFFIX} ${GO_LDFLAGS} ${GO_TAGS} ${GO_GCFLAGS} ./$<
|
||||||
|
|
||||||
binaries: $(BINARIES) ## build binaries
|
binaries: $(BINARIES) ## build binaries
|
||||||
@echo "$(WHALE) $@"
|
@echo "$(WHALE) $@"
|
||||||
|
|
||||||
|
release: $(BINARIES)
|
||||||
|
@echo "$(WHALE) $@"
|
||||||
|
@mkdir -p releases/${RELEASE}
|
||||||
|
@cp $(BINARIES) releases/$(RELEASE)/
|
||||||
|
@cd releases/$(RELEASE) && tar -czf ../$(RELEASE).tar.gz *
|
||||||
|
|
||||||
clean: ## clean up binaries
|
clean: ## clean up binaries
|
||||||
@echo "$(WHALE) $@"
|
@echo "$(WHALE) $@"
|
||||||
@rm -f $(BINARIES)
|
@rm -f $(BINARIES)
|
||||||
@ -169,14 +186,14 @@ coverage: ## generate coverprofiles from the unit tests, except tests that requi
|
|||||||
|
|
||||||
root-coverage: ## generae coverage profiles for the unit tests
|
root-coverage: ## generae coverage profiles for the unit tests
|
||||||
@echo "$(WHALE) $@"
|
@echo "$(WHALE) $@"
|
||||||
@( for pkg in ${SNAPSHOT_PACKAGES}; do \
|
@( for pkg in $(filter-out ${INTEGRATION_PACKAGE},${TEST_REQUIRES_ROOT_PACKAGES}); do \
|
||||||
go test -i ${TESTFLAGS} -test.short -coverprofile="../../../$$pkg/coverage.txt" -covermode=atomic $$pkg -test.root || exit; \
|
go test -i ${TESTFLAGS} -test.short -coverprofile="../../../$$pkg/coverage.txt" -covermode=atomic $$pkg -test.root || exit; \
|
||||||
go test ${TESTFLAGS} -test.short -coverprofile="../../../$$pkg/coverage.txt" -covermode=atomic $$pkg -test.root || exit; \
|
go test ${TESTFLAGS} -test.short -coverprofile="../../../$$pkg/coverage.txt" -covermode=atomic $$pkg -test.root || exit; \
|
||||||
done )
|
done )
|
||||||
|
|
||||||
coverage-integration: ## generate coverprofiles from the integration tests
|
coverage-integration: ## generate coverprofiles from the integration tests
|
||||||
@echo "$(WHALE) $@"
|
@echo "$(WHALE) $@"
|
||||||
go test ${TESTFLAGS} -test.short -coverprofile="../../../${INTEGRATION_PACKAGE}/coverage.txt" -covermode=atomic ${INTEGRATION_PACKAGE}
|
go test ${TESTFLAGS} -test.short -coverprofile="../../../${INTEGRATION_PACKAGE}/coverage.txt" -covermode=atomic ${INTEGRATION_PACKAGE} -test.root
|
||||||
|
|
||||||
vendor:
|
vendor:
|
||||||
@echo "$(WHALE) $@"
|
@echo "$(WHALE) $@"
|
||||||
|
29
vendor/github.com/containerd/containerd/Protobuild.toml
generated
vendored
Normal file
29
vendor/github.com/containerd/containerd/Protobuild.toml
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
version = "unstable"
|
||||||
|
generator = "gogoctrd"
|
||||||
|
plugins = ["grpc"]
|
||||||
|
|
||||||
|
# Control protoc include paths. Below are usually some good defaults, but feel
|
||||||
|
# free to try it without them if it works for your project.
|
||||||
|
[includes]
|
||||||
|
# Include paths that will be added before all others. Typically, you want to
|
||||||
|
# treat the root of the project as an include, but this may not be necessary.
|
||||||
|
before = ["."]
|
||||||
|
|
||||||
|
# Paths that should be treated as include roots in relation to the vendor
|
||||||
|
# directory. These will be calculated with the vendor directory nearest the
|
||||||
|
# target package.
|
||||||
|
vendored = ["github.com/gogo/protobuf"]
|
||||||
|
|
||||||
|
# Paths that will be added untouched to the end of the includes. We use
|
||||||
|
# `/usr/local/include` to pickup the common install location of protobuf.
|
||||||
|
# This is the default.
|
||||||
|
after = ["/usr/local/include"]
|
||||||
|
|
||||||
|
# This section maps protobuf imports to Go packages. These will become
|
||||||
|
# `-M` directives in the call to the go protobuf generator.
|
||||||
|
[packages]
|
||||||
|
"gogoproto/gogo.proto" = "github.com/gogo/protobuf/gogoproto"
|
||||||
|
"google/protobuf/any.proto" = "github.com/gogo/protobuf/types"
|
||||||
|
"google/protobuf/descriptor.proto" = "github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
|
||||||
|
"google/protobuf/field_mask.proto" = "github.com/gogo/protobuf/types"
|
||||||
|
"google/protobuf/timestamp.proto" = "github.com/gogo/protobuf/types"
|
221
vendor/github.com/containerd/containerd/README.md
generated
vendored
221
vendor/github.com/containerd/containerd/README.md
generated
vendored
@ -1,26 +1,147 @@
|
|||||||

|

|
||||||
|
|
||||||
|
[](https://godoc.org/github.com/containerd/containerd)
|
||||||
[](https://travis-ci.org/containerd/containerd)
|
[](https://travis-ci.org/containerd/containerd)
|
||||||
[](https://app.fossa.io/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fcontainerd%2Fcontainerd?ref=badge_shield)
|
[](https://app.fossa.io/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fcontainerd%2Fcontainerd?ref=badge_shield)
|
||||||
|
[](https://goreportcard.com/report/github.com/containerd/containerd)
|
||||||
|
|
||||||
containerd is an industry-standard container runtime with an emphasis on simplicity, robustness and portability. It is available as a daemon for Linux and Windows, which can manage the complete container lifecycle of its host system: image transfer and storage, container execution and supervision, low-level storage and network attachments, etc..
|
containerd is an industry-standard container runtime with an emphasis on simplicity, robustness and portability. It is available as a daemon for Linux and Windows, which can manage the complete container lifecycle of its host system: image transfer and storage, container execution and supervision, low-level storage and network attachments, etc.
|
||||||
|
|
||||||
containerd is designed to be embedded into a larger system, rather than being used directly by developers or end-users.
|
containerd is designed to be embedded into a larger system, rather than being used directly by developers or end-users.
|
||||||
|
|
||||||
### State of the Project
|
## Features
|
||||||
|
|
||||||
containerd currently has two active branches.
|
### Client
|
||||||
There is a [v0.2.x](https://github.com/containerd/containerd/tree/v0.2.x) branch for the current release of containerd that is being consumed by Docker and others and the master branch is the development branch for the 1.0 roadmap and feature set.
|
|
||||||
Any PR or issue that is intended for the current v0.2.x release should be tagged with the same `v0.2.x` tag.
|
|
||||||
|
|
||||||
### Communication
|
containerd offers a full client package to help you integrate containerd into your platform.
|
||||||
|
|
||||||
For async communication and long running discussions please use issues and pull requests on the github repo.
|
```go
|
||||||
This will be the best place to discuss design and implementation.
|
|
||||||
|
|
||||||
For sync communication we have a community slack with a #containerd channel that everyone is welcome to join and chat about development.
|
import "github.com/containerd/containerd"
|
||||||
|
|
||||||
**Slack:** https://dockr.ly/community
|
func main() {
|
||||||
|
client, err := containerd.New("/run/containerd/containerd.sock")
|
||||||
|
defer client.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
### Namespaces
|
||||||
|
|
||||||
|
Namespaces allow multiple consumers to use the same containerd without conflicting with each other. It has the benefit of sharing content but still having separation with containers and images.
|
||||||
|
|
||||||
|
To set a namespace for requests to the API:
|
||||||
|
|
||||||
|
```go
|
||||||
|
context = context.Background()
|
||||||
|
// create a context for docker
|
||||||
|
docker = namespaces.WithNamespace(context, "docker")
|
||||||
|
|
||||||
|
containerd, err := client.NewContainer(docker, "id")
|
||||||
|
```
|
||||||
|
|
||||||
|
To set a default namespace on the client:
|
||||||
|
|
||||||
|
```go
|
||||||
|
client, err := containerd.New(address, containerd.WithDefaultNamespace("docker"))
|
||||||
|
```
|
||||||
|
|
||||||
|
### Distribution
|
||||||
|
|
||||||
|
```go
|
||||||
|
// pull an image
|
||||||
|
image, err := client.Pull(context, "docker.io/library/redis:latest")
|
||||||
|
|
||||||
|
// push an image
|
||||||
|
err := client.Push(context, "docker.io/library/redis:latest", image.Target())
|
||||||
|
```
|
||||||
|
|
||||||
|
### OCI Runtime Specification
|
||||||
|
|
||||||
|
containerd fully supports the OCI runtime specification for running containers. We have built in functions to help you generate runtime specifications based on images as well as custom parameters.
|
||||||
|
|
||||||
|
```go
|
||||||
|
spec, err := containerd.GenerateSpec(containerd.WithImageConfig(context, image))
|
||||||
|
```
|
||||||
|
|
||||||
|
### Containers
|
||||||
|
|
||||||
|
In containerd, a container is a metadata object. Resources such as an OCI runtime specification, image, root filesystem, and other metadata can be attached to a container.
|
||||||
|
|
||||||
|
```go
|
||||||
|
redis, err := client.NewContainer(context, "redis-master",
|
||||||
|
containerd.WithSpec(spec),
|
||||||
|
)
|
||||||
|
defer redis.Delete(context)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Root Filesystems
|
||||||
|
|
||||||
|
containerd allows you to use overlay or snapshot filesystems with your containers. It comes with builtin support for overlayfs and btrfs.
|
||||||
|
|
||||||
|
```go
|
||||||
|
// pull an image and unpack it into the configured snapshotter
|
||||||
|
image, err := client.Pull(context, "docker.io/library/redis:latest", containerd.WithPullUnpack)
|
||||||
|
|
||||||
|
// allocate a new RW root filesystem for a container based on the image
|
||||||
|
redis, err := client.NewContainer(context, "redis-master",
|
||||||
|
containerd.WithSpec(spec),
|
||||||
|
containerd.WithNewSnapshot("redis-rootfs", image),
|
||||||
|
)
|
||||||
|
|
||||||
|
// use a readonly filesystem with multiple containers
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
id := fmt.Sprintf("id-%s", i)
|
||||||
|
container, err := client.NewContainer(ctx, id,
|
||||||
|
containerd.WithSpec(spec),
|
||||||
|
containerd.WithNewSnapshotView(id, image),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Tasks
|
||||||
|
|
||||||
|
Taking a container object and turning it into a runnable process on a system is done by creating a new `Task` from the container. A task represents the runnable object within containerd.
|
||||||
|
|
||||||
|
```go
|
||||||
|
// create a new task
|
||||||
|
task, err := redis.NewTask(context, containerd.Stdio)
|
||||||
|
defer task.Delete(context)
|
||||||
|
|
||||||
|
// the task is now running and has a pid that can be use to setup networking
|
||||||
|
// or other runtime settings outside of containerd
|
||||||
|
pid := task.Pid()
|
||||||
|
|
||||||
|
// start the redis-server process inside the container
|
||||||
|
err := task.Start(context)
|
||||||
|
|
||||||
|
// wait for the task to exit and get the exit status
|
||||||
|
status, err := task.Wait(context)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Checkpoint and Restore
|
||||||
|
|
||||||
|
If you have [criu](https://criu.org/Main_Page) installed on your machine you can checkpoint and restore containers and their tasks. This allow you to clone and/or live migrate containers to other machines.
|
||||||
|
|
||||||
|
```go
|
||||||
|
// checkpoint the task then push it to a registry
|
||||||
|
checkpoint, err := task.Checkpoint(context, containerd.WithExit)
|
||||||
|
|
||||||
|
err := client.Push(context, "myregistry/checkpoints/redis:master", checkpoint)
|
||||||
|
|
||||||
|
// on a new machine pull the checkpoint and restore the redis container
|
||||||
|
image, err := client.Pull(context, "myregistry/checkpoints/redis:master")
|
||||||
|
|
||||||
|
checkpoint := image.Target()
|
||||||
|
|
||||||
|
redis, err = client.NewContainer(context, "redis-master", containerd.WithCheckpoint(checkpoint, "redis-rootfs"))
|
||||||
|
defer container.Delete(context)
|
||||||
|
|
||||||
|
task, err = redis.NewTask(context, containerd.Stdio, containerd.WithTaskCheckpoint(checkpoint))
|
||||||
|
defer task.Delete(context)
|
||||||
|
|
||||||
|
err := task.Start(context)
|
||||||
|
```
|
||||||
|
|
||||||
### Developer Quick-Start
|
### Developer Quick-Start
|
||||||
|
|
||||||
@ -47,44 +168,6 @@ Vendoring of external imports uses the [`vndr` tool](https://github.com/LK4D4/vn
|
|||||||
|
|
||||||
Please refer to [RUNC.md](/RUNC.md) for the currently supported version of `runc` that is used by containerd.
|
Please refer to [RUNC.md](/RUNC.md) for the currently supported version of `runc` that is used by containerd.
|
||||||
|
|
||||||
## Features
|
|
||||||
|
|
||||||
* OCI Image Spec support
|
|
||||||
* OCI Runtime Spec support
|
|
||||||
* Image push and pull support
|
|
||||||
* Container runtime and lifecycle support
|
|
||||||
* Management of network namespaces containers to join existing namespaces
|
|
||||||
* Multi-tenant supported with CAS storage for global images
|
|
||||||
|
|
||||||
## Scope and Principles
|
|
||||||
|
|
||||||
Having a clearly defined scope of a project is important for ensuring consistency and focus.
|
|
||||||
These following criteria will be used when reviewing pull requests, features, and changes for the project before being accepted.
|
|
||||||
|
|
||||||
### Components
|
|
||||||
|
|
||||||
Components should not have tight dependencies on each other so that they are able to be used independently.
|
|
||||||
The APIs for images and containers should be designed in a way that when used together the components have a natural flow but still be useful independently.
|
|
||||||
|
|
||||||
An example for this design can be seen with the overlay filesystems and the container execution layer.
|
|
||||||
The execution layer and overlay filesystems can be used independently but if you were to use both, they share a common `Mount` struct that the filesystems produce and the execution layer consumes.
|
|
||||||
|
|
||||||
### Primitives
|
|
||||||
|
|
||||||
containerd should expose primitives to solve problems instead of building high level abstractions in the API.
|
|
||||||
A common example of this is how build would be implemented.
|
|
||||||
Instead of having a build API in containerd we should expose the lower level primitives that allow things required in build to work.
|
|
||||||
Breaking up the filesystem APIs to allow snapshots, copy functionality, and mounts allow people implementing build at the higher levels more flexibility.
|
|
||||||
|
|
||||||
### Extensibility and Defaults
|
|
||||||
|
|
||||||
For the various components in containerd there should be defined extension points where implementations can be swapped for alternatives.
|
|
||||||
The best example of this is that containerd will use `runc` from OCI as the default runtime in the execution layer but other runtimes conforming to the OCI Runtime specification they can be easily added to containerd.
|
|
||||||
|
|
||||||
containerd will come with a default implementation for the various components.
|
|
||||||
These defaults will be chosen by the maintainers of the project and should not change unless better tech for that component comes out.
|
|
||||||
Additional implementations will not be accepted into the core repository and should be developed in a separate repository not maintained by the containerd maintainers.
|
|
||||||
|
|
||||||
### Releases
|
### Releases
|
||||||
|
|
||||||
containerd will be released with a 1.0 when feature complete and this version will be supported for 1 year with security and bug fixes applied and released.
|
containerd will be released with a 1.0 when feature complete and this version will be supported for 1 year with security and bug fixes applied and released.
|
||||||
@ -96,45 +179,23 @@ There is no compatibility guarantees with upgrades from two minor releases. i.e.
|
|||||||
There are not backwards compatibility guarantees with upgrades to major versions. i.e 1.0.0 to 2.0.0.
|
There are not backwards compatibility guarantees with upgrades to major versions. i.e 1.0.0 to 2.0.0.
|
||||||
Each major version will be supported for 1 year with bug fixes and security patches.
|
Each major version will be supported for 1 year with bug fixes and security patches.
|
||||||
|
|
||||||
### Scope
|
|
||||||
|
|
||||||
The following table specifies the various components of containerd and general features of container runtimes.
|
|
||||||
The table specifies whether or not the feature/component is in or out of scope.
|
|
||||||
|
|
||||||
| Name | Description | In/Out | Reason |
|
|
||||||
|------------------------------|--------------------------------------------------------------------------------------------------------|--------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
|
||||||
| execution | Provide an extensible execution layer for executing a container | in | Create,start, stop pause, resume exec, signal, delete |
|
|
||||||
| cow filesystem | Built in functionality for overlay, aufs, and other copy on write filesystems for containers | in | |
|
|
||||||
| distribution | Having the ability to push and pull images as well as operations on images as a first class API object | in | containerd will fully support the management and retrieval of images |
|
|
||||||
| metrics | container-level metrics, cgroup stats, and OOM events | in |
|
|
||||||
| networking | creation and management of network interfaces | out | Networking will be handled and provided to containerd via higher level systems. |
|
|
||||||
| build | Building images as a first class API | out | Build is a higher level tooling feature and can be implemented in many different ways on top of containerd |
|
|
||||||
| volumes | Volume management for external data | out | The API supports mounts, binds, etc where all volumes type systems can be built on top of containerd. |
|
|
||||||
| logging | Persisting container logs | out | Logging can be build on top of containerd because the container’s STDIO will be provided to the clients and they can persist any way they see fit. There is no io copying of container STDIO in containerd. |
|
|
||||||
|
|
||||||
|
|
||||||
containerd is scoped to a single host and makes assumptions based on that fact.
|
|
||||||
It can be used to build things like a node agent that launches containers but does not have any concepts of a distributed system.
|
|
||||||
|
|
||||||
containerd is designed to be embedded into a larger system, hence it only includes a barebone CLI (`ctr`) specifically for development and debugging purpose, with no mandate to be human-friendly, and no guarantee of interface stability over time.
|
|
||||||
|
|
||||||
Also things like service discovery are out of scope even though networking is in scope.
|
|
||||||
containerd should provide the primitives to create, add, remove, or manage network interfaces and network namespaces for a container but IP allocation, discovery, and DNS should be handled at higher layers.
|
|
||||||
|
|
||||||
### How is the scope changed?
|
|
||||||
|
|
||||||
The scope of this project is a whitelist.
|
|
||||||
If it's not mentioned as being in scope, it is out of scope.
|
|
||||||
For the scope of this project to change it requires a 100% vote from all maintainers of the project.
|
|
||||||
|
|
||||||
### Development reports.
|
### Development reports.
|
||||||
|
|
||||||
Weekly summary on the progress and what is being worked on.
|
Weekly summary on the progress and what is being worked on.
|
||||||
https://github.com/containerd/containerd/tree/master/reports
|
https://github.com/containerd/containerd/tree/master/reports
|
||||||
|
|
||||||
|
### Communication
|
||||||
|
|
||||||
|
For async communication and long running discussions please use issues and pull requests on the github repo.
|
||||||
|
This will be the best place to discuss design and implementation.
|
||||||
|
|
||||||
|
For sync communication we have a community slack with a #containerd channel that everyone is welcome to join and chat about development.
|
||||||
|
|
||||||
|
**Slack:** https://dockr.ly/community
|
||||||
|
|
||||||
## Copyright and license
|
## Copyright and license
|
||||||
|
|
||||||
Copyright © 2016 Docker, Inc. All rights reserved, except as follows. Code
|
Copyright ©2016-2017 Docker, Inc. All rights reserved, except as follows. Code
|
||||||
is released under the Apache 2.0 license. The README.md file, and files in the
|
is released under the Apache 2.0 license. The README.md file, and files in the
|
||||||
"docs" folder are licensed under the Creative Commons Attribution 4.0
|
"docs" folder are licensed under the Creative Commons Attribution 4.0
|
||||||
International License under the terms and conditions set forth in the file
|
International License under the terms and conditions set forth in the file
|
||||||
|
79
vendor/github.com/containerd/containerd/ROADMAP.md
generated
vendored
79
vendor/github.com/containerd/containerd/ROADMAP.md
generated
vendored
@ -1,78 +1,3 @@
|
|||||||
# containerd roadmap
|
# containerd Roadmap
|
||||||
|
|
||||||
This is a high level roadmap for the project that outlines what is currently being worked on, what comes next, and where you can help.
|
Please review the milestones on [github](https://github.com/containerd/containerd/milestones) for the updated roadmap and release information.
|
||||||
|
|
||||||
For a more up to date look please review the milestones on [github](https://github.com/containerd/containerd/milestones).
|
|
||||||
|
|
||||||
The following are the different status the various phases of development can be in:
|
|
||||||
* Not Started - no work or thinking has been done towards the goal
|
|
||||||
* In Design - design work has started for the component and you can find design documents in the `design` folder
|
|
||||||
* In Progress - design has mostly finished and development has started
|
|
||||||
* Completed - the development work has been completed
|
|
||||||
* Stable - the apis for the phase are feature complete and considered stable
|
|
||||||
|
|
||||||
We would like to follow the roadmap and develop the components one by one to completion before starting the next phase. If PRs are opened for another phase before the previous phase has been completed they will be closed as we are not ready for them at that time.
|
|
||||||
|
|
||||||
## Phase 1
|
|
||||||
|
|
||||||
**Status:** In Progress
|
|
||||||
|
|
||||||
### GRPC API
|
|
||||||
|
|
||||||
**Documents:**
|
|
||||||
|
|
||||||
We are going from a top down design for filling out this missing pieces of containerd and design of the API.
|
|
||||||
|
|
||||||
### Design
|
|
||||||
|
|
||||||
**Documents:**
|
|
||||||
|
|
||||||
The high level design work is needed so that the architecture of containerd stays consistent throughout the development process.
|
|
||||||
|
|
||||||
### Build & Test Process
|
|
||||||
|
|
||||||
**Documents:**
|
|
||||||
|
|
||||||
We need to have a simple build and test process for new developers to bootstrap their environments.
|
|
||||||
Because containerd will be the base of many high level systems we need to have a simple build process that does
|
|
||||||
not require high level tooling.
|
|
||||||
|
|
||||||
## Phase 2
|
|
||||||
|
|
||||||
Phase 2 includes most of the design and development work for the execution and storage layers of containerd.
|
|
||||||
It will include porting over existing "graph drivers" from Docker Engine and finding a common model for representing snapshots for layered filesystems.
|
|
||||||
|
|
||||||
This will also include moving the existing execution code support OCI's Runtime Spec and the existing containerd execution code.
|
|
||||||
|
|
||||||
**Status:** In Design
|
|
||||||
|
|
||||||
### Runtime
|
|
||||||
|
|
||||||
The runtime layer is responsible for the creation of containers and their management, and supervision of the processes inside those containers.
|
|
||||||
|
|
||||||
### Storage
|
|
||||||
|
|
||||||
**Documents:** https://github.com/containerd/containerd/blob/master/design/snapshots.md
|
|
||||||
|
|
||||||
The current graph drivers were built when we only had overlay filesystems like aufs.
|
|
||||||
We forced the model to be designed around overlay filesystems and this introduced a lot of complexity for snapshotting graph drivers like btrfs and devicemapper thin-p.
|
|
||||||
Our current approach is to model our storage layer after snapshotting drivers instead of overlay drivers as we can get the same results and its cleaner and more robust to have an overlay filesytem model snapshots than it is to have a snapshot filesystem model overlay filesystems.
|
|
||||||
|
|
||||||
## Phase 3
|
|
||||||
|
|
||||||
This phase includes getting support for the OCI Image spec built into containerd.
|
|
||||||
|
|
||||||
**Status:** Not Started
|
|
||||||
|
|
||||||
### Distribution
|
|
||||||
|
|
||||||
## Phase 4
|
|
||||||
|
|
||||||
Phase 4 involves graduating to version 1.0, and shifting the focus from features to maintenance. Graduating to 1.0 implies:
|
|
||||||
|
|
||||||
- Completing all of the above phases.
|
|
||||||
- Covering the functionalities required by a majority of container-centric platforms.
|
|
||||||
- Offering feature parity, to the extent of technical possibilities, across Linux and Windows.
|
|
||||||
- Demonstrating that containerd fulfills the requirements of at least one higher-level platforms through its complete integration as an upstream.
|
|
||||||
|
|
||||||
**Status:** Not Started
|
|
||||||
|
2
vendor/github.com/containerd/containerd/RUNC.md
generated
vendored
2
vendor/github.com/containerd/containerd/RUNC.md
generated
vendored
@ -2,7 +2,7 @@ containerd is built with OCI support and with support for advanced features prov
|
|||||||
|
|
||||||
We depend on a specific runc version when dealing with advanced features. You should have a specific build for development. The current supported runc commit is:
|
We depend on a specific runc version when dealing with advanced features. You should have a specific build for development. The current supported runc commit is:
|
||||||
|
|
||||||
RUNC_COMMIT = 50401b5b4c2e01e4f1372b73a021742deeaf4e2d
|
RUNC_COMMIT = e775f0fba3ea329b8b766451c892c41a3d49594d
|
||||||
|
|
||||||
## building
|
## building
|
||||||
|
|
||||||
|
57
vendor/github.com/containerd/containerd/SCOPE.md
generated
vendored
Normal file
57
vendor/github.com/containerd/containerd/SCOPE.md
generated
vendored
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
# Scope and Principles
|
||||||
|
|
||||||
|
Having a clearly defined scope of a project is important for ensuring consistency and focus.
|
||||||
|
These following criteria will be used when reviewing pull requests, features, and changes for the project before being accepted.
|
||||||
|
|
||||||
|
### Components
|
||||||
|
|
||||||
|
Components should not have tight dependencies on each other so that they are able to be used independently.
|
||||||
|
The APIs for images and containers should be designed in a way that when used together the components have a natural flow but still be useful independently.
|
||||||
|
|
||||||
|
An example for this design can be seen with the overlay filesystems and the container execution layer.
|
||||||
|
The execution layer and overlay filesystems can be used independently but if you were to use both, they share a common `Mount` struct that the filesystems produce and the execution layer consumes.
|
||||||
|
|
||||||
|
### Primitives
|
||||||
|
|
||||||
|
containerd should expose primitives to solve problems instead of building high level abstractions in the API.
|
||||||
|
A common example of this is how build would be implemented.
|
||||||
|
Instead of having a build API in containerd we should expose the lower level primitives that allow things required in build to work.
|
||||||
|
Breaking up the filesystem APIs to allow snapshots, copy functionality, and mounts allow people implementing build at the higher levels with more flexibility.
|
||||||
|
|
||||||
|
### Extensibility and Defaults
|
||||||
|
|
||||||
|
For the various components in containerd there should be defined extension points where implementations can be swapped for alternatives.
|
||||||
|
The best example of this is that containerd will use `runc` from OCI as the default runtime in the execution layer but other runtimes conforming to the OCI Runtime specification can be easily added to containerd.
|
||||||
|
|
||||||
|
containerd will come with a default implementation for the various components.
|
||||||
|
These defaults will be chosen by the maintainers of the project and should not change unless better tech for that component comes out.
|
||||||
|
Additional implementations will not be accepted into the core repository and should be developed in a separate repository not maintained by the containerd maintainers.
|
||||||
|
|
||||||
|
|
||||||
|
## Scope
|
||||||
|
|
||||||
|
The following table specifies the various components of containerd and general features of container runtimes.
|
||||||
|
The table specifies whether or not the feature/component is in or out of scope.
|
||||||
|
|
||||||
|
| Name | Description | In/Out | Reason |
|
||||||
|
|------------------------------|--------------------------------------------------------------------------------------------------------|--------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
|
| execution | Provide an extensible execution layer for executing a container | in | Create,start, stop pause, resume exec, signal, delete |
|
||||||
|
| cow filesystem | Built in functionality for overlay, aufs, and other copy on write filesystems for containers | in | |
|
||||||
|
| distribution | Having the ability to push and pull images as well as operations on images as a first class API object | in | containerd will fully support the management and retrieval of images |
|
||||||
|
| metrics | container-level metrics, cgroup stats, and OOM events | in |
|
||||||
|
| networking | creation and management of network interfaces | out | Networking will be handled and provided to containerd via higher level systems. |
|
||||||
|
| build | Building images as a first class API | out | Build is a higher level tooling feature and can be implemented in many different ways on top of containerd |
|
||||||
|
| volumes | Volume management for external data | out | The API supports mounts, binds, etc where all volumes type systems can be built on top of containerd. |
|
||||||
|
| logging | Persisting container logs | out | Logging can be build on top of containerd because the container’s STDIO will be provided to the clients and they can persist any way they see fit. There is no io copying of container STDIO in containerd. |
|
||||||
|
|
||||||
|
|
||||||
|
containerd is scoped to a single host and makes assumptions based on that fact.
|
||||||
|
It can be used to build things like a node agent that launches containers but does not have any concepts of a distributed system.
|
||||||
|
|
||||||
|
containerd is designed to be embedded into a larger system, hence it only includes a barebone CLI (`ctr`) specifically for development and debugging purpose, with no mandate to be human-friendly, and no guarantee of interface stability over time.
|
||||||
|
|
||||||
|
### How is the scope changed?
|
||||||
|
|
||||||
|
The scope of this project is a whitelist.
|
||||||
|
If it's not mentioned as being in scope, it is out of scope.
|
||||||
|
For the scope of this project to change it requires a 100% vote from all maintainers of the project.
|
@ -1,12 +1,12 @@
|
|||||||
// Code generated by protoc-gen-gogo.
|
// Code generated by protoc-gen-gogo.
|
||||||
// source: github.com/containerd/containerd/api/services/containers/containers.proto
|
// source: github.com/containerd/containerd/api/services/containers/v1/containers.proto
|
||||||
// DO NOT EDIT!
|
// DO NOT EDIT!
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Package containers is a generated protocol buffer package.
|
Package containers is a generated protocol buffer package.
|
||||||
|
|
||||||
It is generated from these files:
|
It is generated from these files:
|
||||||
github.com/containerd/containerd/api/services/containers/containers.proto
|
github.com/containerd/containerd/api/services/containers/v1/containers.proto
|
||||||
|
|
||||||
It has these top-level messages:
|
It has these top-level messages:
|
||||||
Container
|
Container
|
||||||
@ -30,7 +30,7 @@ import google_protobuf1 "github.com/gogo/protobuf/types"
|
|||||||
import google_protobuf2 "github.com/golang/protobuf/ptypes/empty"
|
import google_protobuf2 "github.com/golang/protobuf/ptypes/empty"
|
||||||
import google_protobuf3 "github.com/gogo/protobuf/types"
|
import google_protobuf3 "github.com/gogo/protobuf/types"
|
||||||
import _ "github.com/gogo/protobuf/types"
|
import _ "github.com/gogo/protobuf/types"
|
||||||
import _ "github.com/containerd/containerd/api/types/descriptor"
|
import _ "github.com/containerd/containerd/api/types"
|
||||||
|
|
||||||
import time "time"
|
import time "time"
|
||||||
|
|
||||||
@ -75,9 +75,11 @@ type Container struct {
|
|||||||
// If this field is updated, the spec and rootfs needed to updated, as well.
|
// If this field is updated, the spec and rootfs needed to updated, as well.
|
||||||
Image string `protobuf:"bytes,3,opt,name=image,proto3" json:"image,omitempty"`
|
Image string `protobuf:"bytes,3,opt,name=image,proto3" json:"image,omitempty"`
|
||||||
// Runtime specifies which runtime to use for executing this container.
|
// Runtime specifies which runtime to use for executing this container.
|
||||||
Runtime string `protobuf:"bytes,4,opt,name=runtime,proto3" json:"runtime,omitempty"`
|
Runtime *Container_Runtime `protobuf:"bytes,4,opt,name=runtime" json:"runtime,omitempty"`
|
||||||
// Spec to be used when creating the container. This is runtime specific.
|
// Spec to be used when creating the container. This is runtime specific.
|
||||||
Spec *google_protobuf1.Any `protobuf:"bytes,6,opt,name=spec" json:"spec,omitempty"`
|
Spec *google_protobuf1.Any `protobuf:"bytes,5,opt,name=spec" json:"spec,omitempty"`
|
||||||
|
// Snapshotter specifies the snapshotter name used for rootfs
|
||||||
|
Snapshotter string `protobuf:"bytes,6,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"`
|
||||||
// RootFS specifies the snapshot key to use for the container's root
|
// RootFS specifies the snapshot key to use for the container's root
|
||||||
// filesystem. When starting a task from this container, a caller should
|
// filesystem. When starting a task from this container, a caller should
|
||||||
// look up the mounts from the snapshot service and include those on the
|
// look up the mounts from the snapshot service and include those on the
|
||||||
@ -87,7 +89,9 @@ type Container struct {
|
|||||||
//
|
//
|
||||||
// This field may be updated.
|
// This field may be updated.
|
||||||
RootFS string `protobuf:"bytes,7,opt,name=rootfs,proto3" json:"rootfs,omitempty"`
|
RootFS string `protobuf:"bytes,7,opt,name=rootfs,proto3" json:"rootfs,omitempty"`
|
||||||
|
// CreatedAt is the time the container was first created.
|
||||||
CreatedAt time.Time `protobuf:"bytes,8,opt,name=created_at,json=createdAt,stdtime" json:"created_at"`
|
CreatedAt time.Time `protobuf:"bytes,8,opt,name=created_at,json=createdAt,stdtime" json:"created_at"`
|
||||||
|
// UpdatedAt is the last time the container was mutated.
|
||||||
UpdatedAt time.Time `protobuf:"bytes,9,opt,name=updated_at,json=updatedAt,stdtime" json:"updated_at"`
|
UpdatedAt time.Time `protobuf:"bytes,9,opt,name=updated_at,json=updatedAt,stdtime" json:"updated_at"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -95,6 +99,17 @@ func (m *Container) Reset() { *m = Container{} }
|
|||||||
func (*Container) ProtoMessage() {}
|
func (*Container) ProtoMessage() {}
|
||||||
func (*Container) Descriptor() ([]byte, []int) { return fileDescriptorContainers, []int{0} }
|
func (*Container) Descriptor() ([]byte, []int) { return fileDescriptorContainers, []int{0} }
|
||||||
|
|
||||||
|
type Container_Runtime struct {
|
||||||
|
// Name is the name of the runtime.
|
||||||
|
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||||
|
// Options specify additional runtime initialization options.
|
||||||
|
Options *google_protobuf1.Any `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Container_Runtime) Reset() { *m = Container_Runtime{} }
|
||||||
|
func (*Container_Runtime) ProtoMessage() {}
|
||||||
|
func (*Container_Runtime) Descriptor() ([]byte, []int) { return fileDescriptorContainers, []int{0, 1} }
|
||||||
|
|
||||||
type GetContainerRequest struct {
|
type GetContainerRequest struct {
|
||||||
ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
|
ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||||
}
|
}
|
||||||
@ -112,7 +127,17 @@ func (*GetContainerResponse) ProtoMessage() {}
|
|||||||
func (*GetContainerResponse) Descriptor() ([]byte, []int) { return fileDescriptorContainers, []int{2} }
|
func (*GetContainerResponse) Descriptor() ([]byte, []int) { return fileDescriptorContainers, []int{2} }
|
||||||
|
|
||||||
type ListContainersRequest struct {
|
type ListContainersRequest struct {
|
||||||
Filter string `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"`
|
// Filters contains one or more filters using the syntax defined in the
|
||||||
|
// containerd filter package.
|
||||||
|
//
|
||||||
|
// The returned result will be those that match any of the provided
|
||||||
|
// filters. Expanded, containers that match the following will be
|
||||||
|
// returned:
|
||||||
|
//
|
||||||
|
// filters[0] or filters[1] or ... or filters[n-1] or filters[n]
|
||||||
|
//
|
||||||
|
// If filters is zero-length or nil, all items will be returned.
|
||||||
|
Filters []string `protobuf:"bytes,1,rep,name=filters" json:"filters,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *ListContainersRequest) Reset() { *m = ListContainersRequest{} }
|
func (m *ListContainersRequest) Reset() { *m = ListContainersRequest{} }
|
||||||
@ -183,16 +208,17 @@ func (*DeleteContainerRequest) ProtoMessage() {}
|
|||||||
func (*DeleteContainerRequest) Descriptor() ([]byte, []int) { return fileDescriptorContainers, []int{9} }
|
func (*DeleteContainerRequest) Descriptor() ([]byte, []int) { return fileDescriptorContainers, []int{9} }
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
proto.RegisterType((*Container)(nil), "containerd.v1.Container")
|
proto.RegisterType((*Container)(nil), "containerd.services.containers.v1.Container")
|
||||||
proto.RegisterType((*GetContainerRequest)(nil), "containerd.v1.GetContainerRequest")
|
proto.RegisterType((*Container_Runtime)(nil), "containerd.services.containers.v1.Container.Runtime")
|
||||||
proto.RegisterType((*GetContainerResponse)(nil), "containerd.v1.GetContainerResponse")
|
proto.RegisterType((*GetContainerRequest)(nil), "containerd.services.containers.v1.GetContainerRequest")
|
||||||
proto.RegisterType((*ListContainersRequest)(nil), "containerd.v1.ListContainersRequest")
|
proto.RegisterType((*GetContainerResponse)(nil), "containerd.services.containers.v1.GetContainerResponse")
|
||||||
proto.RegisterType((*ListContainersResponse)(nil), "containerd.v1.ListContainersResponse")
|
proto.RegisterType((*ListContainersRequest)(nil), "containerd.services.containers.v1.ListContainersRequest")
|
||||||
proto.RegisterType((*CreateContainerRequest)(nil), "containerd.v1.CreateContainerRequest")
|
proto.RegisterType((*ListContainersResponse)(nil), "containerd.services.containers.v1.ListContainersResponse")
|
||||||
proto.RegisterType((*CreateContainerResponse)(nil), "containerd.v1.CreateContainerResponse")
|
proto.RegisterType((*CreateContainerRequest)(nil), "containerd.services.containers.v1.CreateContainerRequest")
|
||||||
proto.RegisterType((*UpdateContainerRequest)(nil), "containerd.v1.UpdateContainerRequest")
|
proto.RegisterType((*CreateContainerResponse)(nil), "containerd.services.containers.v1.CreateContainerResponse")
|
||||||
proto.RegisterType((*UpdateContainerResponse)(nil), "containerd.v1.UpdateContainerResponse")
|
proto.RegisterType((*UpdateContainerRequest)(nil), "containerd.services.containers.v1.UpdateContainerRequest")
|
||||||
proto.RegisterType((*DeleteContainerRequest)(nil), "containerd.v1.DeleteContainerRequest")
|
proto.RegisterType((*UpdateContainerResponse)(nil), "containerd.services.containers.v1.UpdateContainerResponse")
|
||||||
|
proto.RegisterType((*DeleteContainerRequest)(nil), "containerd.services.containers.v1.DeleteContainerRequest")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
@ -223,7 +249,7 @@ func NewContainersClient(cc *grpc.ClientConn) ContainersClient {
|
|||||||
|
|
||||||
func (c *containersClient) Get(ctx context.Context, in *GetContainerRequest, opts ...grpc.CallOption) (*GetContainerResponse, error) {
|
func (c *containersClient) Get(ctx context.Context, in *GetContainerRequest, opts ...grpc.CallOption) (*GetContainerResponse, error) {
|
||||||
out := new(GetContainerResponse)
|
out := new(GetContainerResponse)
|
||||||
err := grpc.Invoke(ctx, "/containerd.v1.Containers/Get", in, out, c.cc, opts...)
|
err := grpc.Invoke(ctx, "/containerd.services.containers.v1.Containers/Get", in, out, c.cc, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -232,7 +258,7 @@ func (c *containersClient) Get(ctx context.Context, in *GetContainerRequest, opt
|
|||||||
|
|
||||||
func (c *containersClient) List(ctx context.Context, in *ListContainersRequest, opts ...grpc.CallOption) (*ListContainersResponse, error) {
|
func (c *containersClient) List(ctx context.Context, in *ListContainersRequest, opts ...grpc.CallOption) (*ListContainersResponse, error) {
|
||||||
out := new(ListContainersResponse)
|
out := new(ListContainersResponse)
|
||||||
err := grpc.Invoke(ctx, "/containerd.v1.Containers/List", in, out, c.cc, opts...)
|
err := grpc.Invoke(ctx, "/containerd.services.containers.v1.Containers/List", in, out, c.cc, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -241,7 +267,7 @@ func (c *containersClient) List(ctx context.Context, in *ListContainersRequest,
|
|||||||
|
|
||||||
func (c *containersClient) Create(ctx context.Context, in *CreateContainerRequest, opts ...grpc.CallOption) (*CreateContainerResponse, error) {
|
func (c *containersClient) Create(ctx context.Context, in *CreateContainerRequest, opts ...grpc.CallOption) (*CreateContainerResponse, error) {
|
||||||
out := new(CreateContainerResponse)
|
out := new(CreateContainerResponse)
|
||||||
err := grpc.Invoke(ctx, "/containerd.v1.Containers/Create", in, out, c.cc, opts...)
|
err := grpc.Invoke(ctx, "/containerd.services.containers.v1.Containers/Create", in, out, c.cc, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -250,7 +276,7 @@ func (c *containersClient) Create(ctx context.Context, in *CreateContainerReques
|
|||||||
|
|
||||||
func (c *containersClient) Update(ctx context.Context, in *UpdateContainerRequest, opts ...grpc.CallOption) (*UpdateContainerResponse, error) {
|
func (c *containersClient) Update(ctx context.Context, in *UpdateContainerRequest, opts ...grpc.CallOption) (*UpdateContainerResponse, error) {
|
||||||
out := new(UpdateContainerResponse)
|
out := new(UpdateContainerResponse)
|
||||||
err := grpc.Invoke(ctx, "/containerd.v1.Containers/Update", in, out, c.cc, opts...)
|
err := grpc.Invoke(ctx, "/containerd.services.containers.v1.Containers/Update", in, out, c.cc, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -259,7 +285,7 @@ func (c *containersClient) Update(ctx context.Context, in *UpdateContainerReques
|
|||||||
|
|
||||||
func (c *containersClient) Delete(ctx context.Context, in *DeleteContainerRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) {
|
func (c *containersClient) Delete(ctx context.Context, in *DeleteContainerRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) {
|
||||||
out := new(google_protobuf2.Empty)
|
out := new(google_protobuf2.Empty)
|
||||||
err := grpc.Invoke(ctx, "/containerd.v1.Containers/Delete", in, out, c.cc, opts...)
|
err := grpc.Invoke(ctx, "/containerd.services.containers.v1.Containers/Delete", in, out, c.cc, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -290,7 +316,7 @@ func _Containers_Get_Handler(srv interface{}, ctx context.Context, dec func(inte
|
|||||||
}
|
}
|
||||||
info := &grpc.UnaryServerInfo{
|
info := &grpc.UnaryServerInfo{
|
||||||
Server: srv,
|
Server: srv,
|
||||||
FullMethod: "/containerd.v1.Containers/Get",
|
FullMethod: "/containerd.services.containers.v1.Containers/Get",
|
||||||
}
|
}
|
||||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
return srv.(ContainersServer).Get(ctx, req.(*GetContainerRequest))
|
return srv.(ContainersServer).Get(ctx, req.(*GetContainerRequest))
|
||||||
@ -308,7 +334,7 @@ func _Containers_List_Handler(srv interface{}, ctx context.Context, dec func(int
|
|||||||
}
|
}
|
||||||
info := &grpc.UnaryServerInfo{
|
info := &grpc.UnaryServerInfo{
|
||||||
Server: srv,
|
Server: srv,
|
||||||
FullMethod: "/containerd.v1.Containers/List",
|
FullMethod: "/containerd.services.containers.v1.Containers/List",
|
||||||
}
|
}
|
||||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
return srv.(ContainersServer).List(ctx, req.(*ListContainersRequest))
|
return srv.(ContainersServer).List(ctx, req.(*ListContainersRequest))
|
||||||
@ -326,7 +352,7 @@ func _Containers_Create_Handler(srv interface{}, ctx context.Context, dec func(i
|
|||||||
}
|
}
|
||||||
info := &grpc.UnaryServerInfo{
|
info := &grpc.UnaryServerInfo{
|
||||||
Server: srv,
|
Server: srv,
|
||||||
FullMethod: "/containerd.v1.Containers/Create",
|
FullMethod: "/containerd.services.containers.v1.Containers/Create",
|
||||||
}
|
}
|
||||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
return srv.(ContainersServer).Create(ctx, req.(*CreateContainerRequest))
|
return srv.(ContainersServer).Create(ctx, req.(*CreateContainerRequest))
|
||||||
@ -344,7 +370,7 @@ func _Containers_Update_Handler(srv interface{}, ctx context.Context, dec func(i
|
|||||||
}
|
}
|
||||||
info := &grpc.UnaryServerInfo{
|
info := &grpc.UnaryServerInfo{
|
||||||
Server: srv,
|
Server: srv,
|
||||||
FullMethod: "/containerd.v1.Containers/Update",
|
FullMethod: "/containerd.services.containers.v1.Containers/Update",
|
||||||
}
|
}
|
||||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
return srv.(ContainersServer).Update(ctx, req.(*UpdateContainerRequest))
|
return srv.(ContainersServer).Update(ctx, req.(*UpdateContainerRequest))
|
||||||
@ -362,7 +388,7 @@ func _Containers_Delete_Handler(srv interface{}, ctx context.Context, dec func(i
|
|||||||
}
|
}
|
||||||
info := &grpc.UnaryServerInfo{
|
info := &grpc.UnaryServerInfo{
|
||||||
Server: srv,
|
Server: srv,
|
||||||
FullMethod: "/containerd.v1.Containers/Delete",
|
FullMethod: "/containerd.services.containers.v1.Containers/Delete",
|
||||||
}
|
}
|
||||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
return srv.(ContainersServer).Delete(ctx, req.(*DeleteContainerRequest))
|
return srv.(ContainersServer).Delete(ctx, req.(*DeleteContainerRequest))
|
||||||
@ -371,7 +397,7 @@ func _Containers_Delete_Handler(srv interface{}, ctx context.Context, dec func(i
|
|||||||
}
|
}
|
||||||
|
|
||||||
var _Containers_serviceDesc = grpc.ServiceDesc{
|
var _Containers_serviceDesc = grpc.ServiceDesc{
|
||||||
ServiceName: "containerd.v1.Containers",
|
ServiceName: "containerd.services.containers.v1.Containers",
|
||||||
HandlerType: (*ContainersServer)(nil),
|
HandlerType: (*ContainersServer)(nil),
|
||||||
Methods: []grpc.MethodDesc{
|
Methods: []grpc.MethodDesc{
|
||||||
{
|
{
|
||||||
@ -396,7 +422,7 @@ var _Containers_serviceDesc = grpc.ServiceDesc{
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
Streams: []grpc.StreamDesc{},
|
Streams: []grpc.StreamDesc{},
|
||||||
Metadata: "github.com/containerd/containerd/api/services/containers/containers.proto",
|
Metadata: "github.com/containerd/containerd/api/services/containers/v1/containers.proto",
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Container) Marshal() (dAtA []byte, err error) {
|
func (m *Container) Marshal() (dAtA []byte, err error) {
|
||||||
@ -443,22 +469,32 @@ func (m *Container) MarshalTo(dAtA []byte) (int, error) {
|
|||||||
i = encodeVarintContainers(dAtA, i, uint64(len(m.Image)))
|
i = encodeVarintContainers(dAtA, i, uint64(len(m.Image)))
|
||||||
i += copy(dAtA[i:], m.Image)
|
i += copy(dAtA[i:], m.Image)
|
||||||
}
|
}
|
||||||
if len(m.Runtime) > 0 {
|
if m.Runtime != nil {
|
||||||
dAtA[i] = 0x22
|
dAtA[i] = 0x22
|
||||||
i++
|
i++
|
||||||
i = encodeVarintContainers(dAtA, i, uint64(len(m.Runtime)))
|
i = encodeVarintContainers(dAtA, i, uint64(m.Runtime.Size()))
|
||||||
i += copy(dAtA[i:], m.Runtime)
|
n1, err := m.Runtime.MarshalTo(dAtA[i:])
|
||||||
}
|
|
||||||
if m.Spec != nil {
|
|
||||||
dAtA[i] = 0x32
|
|
||||||
i++
|
|
||||||
i = encodeVarintContainers(dAtA, i, uint64(m.Spec.Size()))
|
|
||||||
n1, err := m.Spec.MarshalTo(dAtA[i:])
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n1
|
i += n1
|
||||||
}
|
}
|
||||||
|
if m.Spec != nil {
|
||||||
|
dAtA[i] = 0x2a
|
||||||
|
i++
|
||||||
|
i = encodeVarintContainers(dAtA, i, uint64(m.Spec.Size()))
|
||||||
|
n2, err := m.Spec.MarshalTo(dAtA[i:])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
i += n2
|
||||||
|
}
|
||||||
|
if len(m.Snapshotter) > 0 {
|
||||||
|
dAtA[i] = 0x32
|
||||||
|
i++
|
||||||
|
i = encodeVarintContainers(dAtA, i, uint64(len(m.Snapshotter)))
|
||||||
|
i += copy(dAtA[i:], m.Snapshotter)
|
||||||
|
}
|
||||||
if len(m.RootFS) > 0 {
|
if len(m.RootFS) > 0 {
|
||||||
dAtA[i] = 0x3a
|
dAtA[i] = 0x3a
|
||||||
i++
|
i++
|
||||||
@ -468,19 +504,53 @@ func (m *Container) MarshalTo(dAtA []byte) (int, error) {
|
|||||||
dAtA[i] = 0x42
|
dAtA[i] = 0x42
|
||||||
i++
|
i++
|
||||||
i = encodeVarintContainers(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt)))
|
i = encodeVarintContainers(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt)))
|
||||||
n2, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.CreatedAt, dAtA[i:])
|
n3, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.CreatedAt, dAtA[i:])
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
i += n2
|
|
||||||
dAtA[i] = 0x4a
|
|
||||||
i++
|
|
||||||
i = encodeVarintContainers(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt)))
|
|
||||||
n3, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i:])
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n3
|
i += n3
|
||||||
|
dAtA[i] = 0x4a
|
||||||
|
i++
|
||||||
|
i = encodeVarintContainers(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt)))
|
||||||
|
n4, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i:])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
i += n4
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Container_Runtime) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalTo(dAtA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Container_Runtime) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if len(m.Name) > 0 {
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
i++
|
||||||
|
i = encodeVarintContainers(dAtA, i, uint64(len(m.Name)))
|
||||||
|
i += copy(dAtA[i:], m.Name)
|
||||||
|
}
|
||||||
|
if m.Options != nil {
|
||||||
|
dAtA[i] = 0x12
|
||||||
|
i++
|
||||||
|
i = encodeVarintContainers(dAtA, i, uint64(m.Options.Size()))
|
||||||
|
n5, err := m.Options.MarshalTo(dAtA[i:])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
i += n5
|
||||||
|
}
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -526,11 +596,11 @@ func (m *GetContainerResponse) MarshalTo(dAtA []byte) (int, error) {
|
|||||||
dAtA[i] = 0xa
|
dAtA[i] = 0xa
|
||||||
i++
|
i++
|
||||||
i = encodeVarintContainers(dAtA, i, uint64(m.Container.Size()))
|
i = encodeVarintContainers(dAtA, i, uint64(m.Container.Size()))
|
||||||
n4, err := m.Container.MarshalTo(dAtA[i:])
|
n6, err := m.Container.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n4
|
i += n6
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -549,11 +619,20 @@ func (m *ListContainersRequest) MarshalTo(dAtA []byte) (int, error) {
|
|||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
if len(m.Filter) > 0 {
|
if len(m.Filters) > 0 {
|
||||||
|
for _, s := range m.Filters {
|
||||||
dAtA[i] = 0xa
|
dAtA[i] = 0xa
|
||||||
i++
|
i++
|
||||||
i = encodeVarintContainers(dAtA, i, uint64(len(m.Filter)))
|
l = len(s)
|
||||||
i += copy(dAtA[i:], m.Filter)
|
for l >= 1<<7 {
|
||||||
|
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
|
||||||
|
l >>= 7
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
dAtA[i] = uint8(l)
|
||||||
|
i++
|
||||||
|
i += copy(dAtA[i:], s)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
@ -606,11 +685,11 @@ func (m *CreateContainerRequest) MarshalTo(dAtA []byte) (int, error) {
|
|||||||
dAtA[i] = 0xa
|
dAtA[i] = 0xa
|
||||||
i++
|
i++
|
||||||
i = encodeVarintContainers(dAtA, i, uint64(m.Container.Size()))
|
i = encodeVarintContainers(dAtA, i, uint64(m.Container.Size()))
|
||||||
n5, err := m.Container.MarshalTo(dAtA[i:])
|
n7, err := m.Container.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n5
|
i += n7
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -632,11 +711,11 @@ func (m *CreateContainerResponse) MarshalTo(dAtA []byte) (int, error) {
|
|||||||
dAtA[i] = 0xa
|
dAtA[i] = 0xa
|
||||||
i++
|
i++
|
||||||
i = encodeVarintContainers(dAtA, i, uint64(m.Container.Size()))
|
i = encodeVarintContainers(dAtA, i, uint64(m.Container.Size()))
|
||||||
n6, err := m.Container.MarshalTo(dAtA[i:])
|
n8, err := m.Container.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n6
|
i += n8
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -658,20 +737,20 @@ func (m *UpdateContainerRequest) MarshalTo(dAtA []byte) (int, error) {
|
|||||||
dAtA[i] = 0xa
|
dAtA[i] = 0xa
|
||||||
i++
|
i++
|
||||||
i = encodeVarintContainers(dAtA, i, uint64(m.Container.Size()))
|
i = encodeVarintContainers(dAtA, i, uint64(m.Container.Size()))
|
||||||
n7, err := m.Container.MarshalTo(dAtA[i:])
|
n9, err := m.Container.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n7
|
i += n9
|
||||||
if m.UpdateMask != nil {
|
if m.UpdateMask != nil {
|
||||||
dAtA[i] = 0x12
|
dAtA[i] = 0x12
|
||||||
i++
|
i++
|
||||||
i = encodeVarintContainers(dAtA, i, uint64(m.UpdateMask.Size()))
|
i = encodeVarintContainers(dAtA, i, uint64(m.UpdateMask.Size()))
|
||||||
n8, err := m.UpdateMask.MarshalTo(dAtA[i:])
|
n10, err := m.UpdateMask.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n8
|
i += n10
|
||||||
}
|
}
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
@ -694,11 +773,11 @@ func (m *UpdateContainerResponse) MarshalTo(dAtA []byte) (int, error) {
|
|||||||
dAtA[i] = 0xa
|
dAtA[i] = 0xa
|
||||||
i++
|
i++
|
||||||
i = encodeVarintContainers(dAtA, i, uint64(m.Container.Size()))
|
i = encodeVarintContainers(dAtA, i, uint64(m.Container.Size()))
|
||||||
n9, err := m.Container.MarshalTo(dAtA[i:])
|
n11, err := m.Container.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n9
|
i += n11
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -772,14 +851,18 @@ func (m *Container) Size() (n int) {
|
|||||||
if l > 0 {
|
if l > 0 {
|
||||||
n += 1 + l + sovContainers(uint64(l))
|
n += 1 + l + sovContainers(uint64(l))
|
||||||
}
|
}
|
||||||
l = len(m.Runtime)
|
if m.Runtime != nil {
|
||||||
if l > 0 {
|
l = m.Runtime.Size()
|
||||||
n += 1 + l + sovContainers(uint64(l))
|
n += 1 + l + sovContainers(uint64(l))
|
||||||
}
|
}
|
||||||
if m.Spec != nil {
|
if m.Spec != nil {
|
||||||
l = m.Spec.Size()
|
l = m.Spec.Size()
|
||||||
n += 1 + l + sovContainers(uint64(l))
|
n += 1 + l + sovContainers(uint64(l))
|
||||||
}
|
}
|
||||||
|
l = len(m.Snapshotter)
|
||||||
|
if l > 0 {
|
||||||
|
n += 1 + l + sovContainers(uint64(l))
|
||||||
|
}
|
||||||
l = len(m.RootFS)
|
l = len(m.RootFS)
|
||||||
if l > 0 {
|
if l > 0 {
|
||||||
n += 1 + l + sovContainers(uint64(l))
|
n += 1 + l + sovContainers(uint64(l))
|
||||||
@ -791,6 +874,20 @@ func (m *Container) Size() (n int) {
|
|||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *Container_Runtime) Size() (n int) {
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
l = len(m.Name)
|
||||||
|
if l > 0 {
|
||||||
|
n += 1 + l + sovContainers(uint64(l))
|
||||||
|
}
|
||||||
|
if m.Options != nil {
|
||||||
|
l = m.Options.Size()
|
||||||
|
n += 1 + l + sovContainers(uint64(l))
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
func (m *GetContainerRequest) Size() (n int) {
|
func (m *GetContainerRequest) Size() (n int) {
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
@ -812,10 +909,12 @@ func (m *GetContainerResponse) Size() (n int) {
|
|||||||
func (m *ListContainersRequest) Size() (n int) {
|
func (m *ListContainersRequest) Size() (n int) {
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
l = len(m.Filter)
|
if len(m.Filters) > 0 {
|
||||||
if l > 0 {
|
for _, s := range m.Filters {
|
||||||
|
l = len(s)
|
||||||
n += 1 + l + sovContainers(uint64(l))
|
n += 1 + l + sovContainers(uint64(l))
|
||||||
}
|
}
|
||||||
|
}
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -908,8 +1007,9 @@ func (this *Container) String() string {
|
|||||||
`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
|
`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
|
||||||
`Labels:` + mapStringForLabels + `,`,
|
`Labels:` + mapStringForLabels + `,`,
|
||||||
`Image:` + fmt.Sprintf("%v", this.Image) + `,`,
|
`Image:` + fmt.Sprintf("%v", this.Image) + `,`,
|
||||||
`Runtime:` + fmt.Sprintf("%v", this.Runtime) + `,`,
|
`Runtime:` + strings.Replace(fmt.Sprintf("%v", this.Runtime), "Container_Runtime", "Container_Runtime", 1) + `,`,
|
||||||
`Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "Any", "google_protobuf1.Any", 1) + `,`,
|
`Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "Any", "google_protobuf1.Any", 1) + `,`,
|
||||||
|
`Snapshotter:` + fmt.Sprintf("%v", this.Snapshotter) + `,`,
|
||||||
`RootFS:` + fmt.Sprintf("%v", this.RootFS) + `,`,
|
`RootFS:` + fmt.Sprintf("%v", this.RootFS) + `,`,
|
||||||
`CreatedAt:` + strings.Replace(strings.Replace(this.CreatedAt.String(), "Timestamp", "google_protobuf4.Timestamp", 1), `&`, ``, 1) + `,`,
|
`CreatedAt:` + strings.Replace(strings.Replace(this.CreatedAt.String(), "Timestamp", "google_protobuf4.Timestamp", 1), `&`, ``, 1) + `,`,
|
||||||
`UpdatedAt:` + strings.Replace(strings.Replace(this.UpdatedAt.String(), "Timestamp", "google_protobuf4.Timestamp", 1), `&`, ``, 1) + `,`,
|
`UpdatedAt:` + strings.Replace(strings.Replace(this.UpdatedAt.String(), "Timestamp", "google_protobuf4.Timestamp", 1), `&`, ``, 1) + `,`,
|
||||||
@ -917,6 +1017,17 @@ func (this *Container) String() string {
|
|||||||
}, "")
|
}, "")
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
func (this *Container_Runtime) String() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
s := strings.Join([]string{`&Container_Runtime{`,
|
||||||
|
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
|
||||||
|
`Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Any", "google_protobuf1.Any", 1) + `,`,
|
||||||
|
`}`,
|
||||||
|
}, "")
|
||||||
|
return s
|
||||||
|
}
|
||||||
func (this *GetContainerRequest) String() string {
|
func (this *GetContainerRequest) String() string {
|
||||||
if this == nil {
|
if this == nil {
|
||||||
return "nil"
|
return "nil"
|
||||||
@ -942,7 +1053,7 @@ func (this *ListContainersRequest) String() string {
|
|||||||
return "nil"
|
return "nil"
|
||||||
}
|
}
|
||||||
s := strings.Join([]string{`&ListContainersRequest{`,
|
s := strings.Join([]string{`&ListContainersRequest{`,
|
||||||
`Filter:` + fmt.Sprintf("%v", this.Filter) + `,`,
|
`Filters:` + fmt.Sprintf("%v", this.Filters) + `,`,
|
||||||
`}`,
|
`}`,
|
||||||
}, "")
|
}, "")
|
||||||
return s
|
return s
|
||||||
@ -1223,7 +1334,7 @@ func (m *Container) Unmarshal(dAtA []byte) error {
|
|||||||
if wireType != 2 {
|
if wireType != 2 {
|
||||||
return fmt.Errorf("proto: wrong wireType = %d for field Runtime", wireType)
|
return fmt.Errorf("proto: wrong wireType = %d for field Runtime", wireType)
|
||||||
}
|
}
|
||||||
var stringLen uint64
|
var msglen int
|
||||||
for shift := uint(0); ; shift += 7 {
|
for shift := uint(0); ; shift += 7 {
|
||||||
if shift >= 64 {
|
if shift >= 64 {
|
||||||
return ErrIntOverflowContainers
|
return ErrIntOverflowContainers
|
||||||
@ -1233,22 +1344,26 @@ func (m *Container) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
stringLen |= (uint64(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
intStringLen := int(stringLen)
|
if msglen < 0 {
|
||||||
if intStringLen < 0 {
|
|
||||||
return ErrInvalidLengthContainers
|
return ErrInvalidLengthContainers
|
||||||
}
|
}
|
||||||
postIndex := iNdEx + intStringLen
|
postIndex := iNdEx + msglen
|
||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.Runtime = string(dAtA[iNdEx:postIndex])
|
if m.Runtime == nil {
|
||||||
|
m.Runtime = &Container_Runtime{}
|
||||||
|
}
|
||||||
|
if err := m.Runtime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
case 6:
|
case 5:
|
||||||
if wireType != 2 {
|
if wireType != 2 {
|
||||||
return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
|
return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
|
||||||
}
|
}
|
||||||
@ -1281,6 +1396,35 @@ func (m *Container) Unmarshal(dAtA []byte) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
|
case 6:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Snapshotter", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowContainers
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthContainers
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Snapshotter = string(dAtA[iNdEx:postIndex])
|
||||||
|
iNdEx = postIndex
|
||||||
case 7:
|
case 7:
|
||||||
if wireType != 2 {
|
if wireType != 2 {
|
||||||
return fmt.Errorf("proto: wrong wireType = %d for field RootFS", wireType)
|
return fmt.Errorf("proto: wrong wireType = %d for field RootFS", wireType)
|
||||||
@ -1391,6 +1535,118 @@ func (m *Container) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
func (m *Container_Runtime) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowContainers
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: Runtime: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: Runtime: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowContainers
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthContainers
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Name = string(dAtA[iNdEx:postIndex])
|
||||||
|
iNdEx = postIndex
|
||||||
|
case 2:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType)
|
||||||
|
}
|
||||||
|
var msglen int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowContainers
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if msglen < 0 {
|
||||||
|
return ErrInvalidLengthContainers
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + msglen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
if m.Options == nil {
|
||||||
|
m.Options = &google_protobuf1.Any{}
|
||||||
|
}
|
||||||
|
if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
iNdEx = postIndex
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipContainers(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthContainers
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
func (m *GetContainerRequest) Unmarshal(dAtA []byte) error {
|
func (m *GetContainerRequest) Unmarshal(dAtA []byte) error {
|
||||||
l := len(dAtA)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
@ -1581,7 +1837,7 @@ func (m *ListContainersRequest) Unmarshal(dAtA []byte) error {
|
|||||||
switch fieldNum {
|
switch fieldNum {
|
||||||
case 1:
|
case 1:
|
||||||
if wireType != 2 {
|
if wireType != 2 {
|
||||||
return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType)
|
return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType)
|
||||||
}
|
}
|
||||||
var stringLen uint64
|
var stringLen uint64
|
||||||
for shift := uint(0); ; shift += 7 {
|
for shift := uint(0); ; shift += 7 {
|
||||||
@ -1606,7 +1862,7 @@ func (m *ListContainersRequest) Unmarshal(dAtA []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.Filter = string(dAtA[iNdEx:postIndex])
|
m.Filters = append(m.Filters, string(dAtA[iNdEx:postIndex]))
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
@ -2248,52 +2504,57 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
proto.RegisterFile("github.com/containerd/containerd/api/services/containers/containers.proto", fileDescriptorContainers)
|
proto.RegisterFile("github.com/containerd/containerd/api/services/containers/v1/containers.proto", fileDescriptorContainers)
|
||||||
}
|
}
|
||||||
|
|
||||||
var fileDescriptorContainers = []byte{
|
var fileDescriptorContainers = []byte{
|
||||||
// 680 bytes of a gzipped FileDescriptorProto
|
// 757 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0xcd, 0x6e, 0xd3, 0x40,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xcb, 0x72, 0xd3, 0x4a,
|
||||||
0x10, 0xae, 0x93, 0xe0, 0x36, 0x13, 0x21, 0xa1, 0x25, 0x04, 0x63, 0xa4, 0x24, 0x32, 0x3f, 0xca,
|
0x10, 0x8d, 0x6c, 0x47, 0x8e, 0xdb, 0x9b, 0x5b, 0x73, 0x7d, 0x7d, 0x85, 0xa8, 0xb2, 0x8d, 0x57,
|
||||||
0x05, 0x1b, 0xc2, 0x85, 0x9f, 0x0a, 0xa9, 0xe9, 0x9f, 0x2a, 0x15, 0x0e, 0x6e, 0xa1, 0xdc, 0x2a,
|
0x5e, 0x80, 0x4c, 0x0c, 0x05, 0x79, 0xac, 0xe2, 0xbc, 0x8a, 0xaa, 0x84, 0x4a, 0x0d, 0xb0, 0x81,
|
||||||
0x27, 0xde, 0x04, 0xab, 0x8e, 0xd7, 0x78, 0x37, 0x95, 0x72, 0xe3, 0x11, 0x90, 0x78, 0x05, 0x8e,
|
0x45, 0x90, 0xed, 0xb1, 0x23, 0x2c, 0x69, 0x84, 0x66, 0xec, 0x2a, 0x17, 0x0b, 0xf8, 0x04, 0xfe,
|
||||||
0x3c, 0x48, 0x8f, 0x1c, 0x39, 0x15, 0x9a, 0x27, 0x41, 0xbb, 0x5e, 0xd7, 0xa9, 0xed, 0x50, 0x10,
|
0x82, 0x5f, 0xc9, 0x92, 0x25, 0xab, 0x3c, 0xfc, 0x25, 0x94, 0x46, 0xa3, 0xc8, 0xf8, 0x51, 0xc8,
|
||||||
0xbd, 0xcd, 0x78, 0xbe, 0xf9, 0x32, 0xf3, 0xcd, 0x67, 0x07, 0x76, 0x46, 0x1e, 0xfb, 0x30, 0xe9,
|
0x81, 0xec, 0xa6, 0x3d, 0x7d, 0xba, 0x8f, 0x4e, 0x9f, 0x96, 0x05, 0x47, 0x3d, 0x8b, 0x9f, 0x0d,
|
||||||
0x9b, 0x03, 0x32, 0xb6, 0x06, 0x24, 0x60, 0x8e, 0x17, 0xe0, 0xc8, 0x9d, 0x0f, 0x9d, 0xd0, 0xb3,
|
0x5a, 0x46, 0x9b, 0x3a, 0xf5, 0x36, 0x75, 0xb9, 0x69, 0xb9, 0xc4, 0xef, 0x4c, 0x1e, 0x4d, 0xcf,
|
||||||
0x28, 0x8e, 0x8e, 0xbd, 0x01, 0xa6, 0xe9, 0xf3, 0xf9, 0xd0, 0x0c, 0x23, 0xc2, 0x08, 0xba, 0x9e,
|
0xaa, 0x33, 0xe2, 0x0f, 0xad, 0x36, 0x61, 0xf1, 0xef, 0xac, 0x3e, 0x5c, 0x9f, 0x88, 0x0c, 0xcf,
|
||||||
0x36, 0x99, 0xc7, 0x4f, 0xf4, 0xfa, 0x88, 0x8c, 0x88, 0xa8, 0x58, 0x3c, 0x8a, 0x41, 0xfa, 0x9d,
|
0xa7, 0x9c, 0xa2, 0x07, 0x31, 0xce, 0x88, 0x30, 0xc6, 0x44, 0xd6, 0x70, 0x5d, 0x2f, 0xf4, 0x68,
|
||||||
0x11, 0x21, 0x23, 0x1f, 0x5b, 0x22, 0xeb, 0x4f, 0x86, 0x96, 0x13, 0x4c, 0x65, 0xe9, 0x6e, 0xb6,
|
0x8f, 0x8a, 0xec, 0x7a, 0x70, 0x0a, 0x81, 0xfa, 0xbd, 0x1e, 0xa5, 0x3d, 0x9b, 0xd4, 0x45, 0xd4,
|
||||||
0x84, 0xc7, 0x21, 0x4b, 0x8a, 0xed, 0x6c, 0x71, 0xe8, 0x61, 0xdf, 0x3d, 0x1c, 0x3b, 0xf4, 0x48,
|
0x1a, 0x74, 0xeb, 0xa6, 0x3b, 0x92, 0x57, 0xf7, 0xa7, 0xaf, 0x88, 0xe3, 0xf1, 0xe8, 0xb2, 0x32,
|
||||||
0x22, 0x5a, 0x59, 0x04, 0xf3, 0xc6, 0x98, 0x32, 0x67, 0x1c, 0x4a, 0xc0, 0xd6, 0x5f, 0xad, 0xca,
|
0x7d, 0xd9, 0xb5, 0x88, 0xdd, 0x39, 0x75, 0x4c, 0xd6, 0x97, 0x19, 0xe5, 0xe9, 0x0c, 0x6e, 0x39,
|
||||||
0xa6, 0x21, 0xa6, 0x96, 0x8b, 0xe9, 0x20, 0xf2, 0x42, 0x46, 0xa2, 0xb9, 0x30, 0xe6, 0x31, 0xbe,
|
0x84, 0x71, 0xd3, 0xf1, 0x64, 0xc2, 0x76, 0x22, 0x05, 0xf8, 0xc8, 0x23, 0xac, 0xde, 0x21, 0xac,
|
||||||
0x96, 0xa1, 0xba, 0x9e, 0x34, 0xa1, 0x06, 0x94, 0x3c, 0x57, 0x53, 0xda, 0x4a, 0xa7, 0xda, 0x53,
|
0xed, 0x5b, 0x1e, 0xa7, 0x7e, 0x08, 0xae, 0x5e, 0x66, 0x20, 0xb7, 0x1b, 0x65, 0xa2, 0x22, 0xa4,
|
||||||
0x67, 0xa7, 0xad, 0xd2, 0xce, 0x86, 0x5d, 0xf2, 0x5c, 0xb4, 0x0a, 0xaa, 0xef, 0xf4, 0xb1, 0x4f,
|
0xac, 0x8e, 0xa6, 0x54, 0x94, 0x5a, 0xae, 0xa9, 0x8e, 0x2f, 0xca, 0xa9, 0x17, 0x7b, 0x38, 0x65,
|
||||||
0xb5, 0x52, 0xbb, 0xdc, 0xa9, 0x75, 0xef, 0x9b, 0x17, 0xe4, 0x31, 0xcf, 0x19, 0xcc, 0x5d, 0x01,
|
0x75, 0xd0, 0x09, 0xa8, 0xb6, 0xd9, 0x22, 0x36, 0xd3, 0x52, 0x95, 0x74, 0x2d, 0xdf, 0xd8, 0x30,
|
||||||
0xdb, 0x0c, 0x58, 0x34, 0xb5, 0x65, 0x0f, 0xaa, 0xc3, 0x35, 0x6f, 0xec, 0x8c, 0xb0, 0x56, 0xe6,
|
0x7e, 0xab, 0x93, 0x71, 0x53, 0xd5, 0x38, 0x12, 0xd0, 0x7d, 0x97, 0xfb, 0x23, 0x2c, 0xeb, 0xa0,
|
||||||
0xc4, 0x76, 0x9c, 0x20, 0x0d, 0x96, 0xa3, 0x49, 0xc0, 0xf7, 0xd2, 0x2a, 0xe2, 0x79, 0x92, 0xa2,
|
0x02, 0xac, 0x5a, 0x8e, 0xd9, 0x23, 0x5a, 0x3a, 0x68, 0x86, 0xc3, 0x00, 0xbd, 0x84, 0xac, 0x3f,
|
||||||
0x0e, 0x54, 0x68, 0x88, 0x07, 0x9a, 0xda, 0x56, 0x3a, 0xb5, 0x6e, 0xdd, 0x8c, 0xb5, 0x30, 0x13,
|
0x70, 0x83, 0x07, 0xd4, 0x32, 0x15, 0xa5, 0x96, 0x6f, 0x3c, 0x5d, 0xaa, 0x11, 0x0e, 0xb1, 0x38,
|
||||||
0x2d, 0xcc, 0xb5, 0x60, 0x6a, 0x0b, 0x04, 0x32, 0x40, 0x8d, 0x08, 0x61, 0x43, 0xaa, 0x2d, 0x8b,
|
0x2a, 0x82, 0x6a, 0x90, 0x61, 0x1e, 0x69, 0x6b, 0xab, 0xa2, 0x58, 0xc1, 0x08, 0xa5, 0x34, 0x22,
|
||||||
0x99, 0x61, 0x76, 0xda, 0x52, 0x6d, 0x42, 0xd8, 0xd6, 0x9e, 0x2d, 0x2b, 0x68, 0x1d, 0x60, 0x10,
|
0x29, 0x8d, 0x1d, 0x77, 0x84, 0x45, 0x06, 0xaa, 0x40, 0x9e, 0xb9, 0xa6, 0xc7, 0xce, 0x28, 0xe7,
|
||||||
0x61, 0x87, 0x61, 0xf7, 0xd0, 0x61, 0xda, 0x8a, 0xe0, 0xd4, 0x73, 0x9c, 0xfb, 0x89, 0xbe, 0xbd,
|
0xc4, 0xd7, 0x54, 0xc1, 0x6a, 0xf2, 0x27, 0x54, 0x05, 0xd5, 0xa7, 0x94, 0x77, 0x99, 0x96, 0x15,
|
||||||
0x95, 0x93, 0xd3, 0xd6, 0xd2, 0xe7, 0x9f, 0x2d, 0xc5, 0xae, 0xca, 0xbe, 0x35, 0xc6, 0x49, 0x26,
|
0xfa, 0xc0, 0xf8, 0xa2, 0xac, 0x62, 0x4a, 0xf9, 0xc1, 0x2b, 0x2c, 0x6f, 0xd0, 0x2e, 0x40, 0xdb,
|
||||||
0xa1, 0x9b, 0x90, 0x54, 0xff, 0x85, 0x44, 0xf6, 0xad, 0x31, 0xfd, 0x39, 0xd4, 0xe6, 0xe4, 0x41,
|
0x27, 0x26, 0x27, 0x9d, 0x53, 0x93, 0x6b, 0x6b, 0xa2, 0xab, 0x3e, 0xd3, 0xf5, 0x75, 0x34, 0xc0,
|
||||||
0x37, 0xa0, 0x7c, 0x84, 0xa7, 0xb1, 0xda, 0x36, 0x0f, 0xb9, 0x50, 0xc7, 0x8e, 0x3f, 0xc1, 0x5a,
|
0xe6, 0xda, 0xf9, 0x45, 0x79, 0xe5, 0xeb, 0x65, 0x59, 0xc1, 0x39, 0x89, 0xdb, 0xe1, 0x41, 0x91,
|
||||||
0x29, 0x16, 0x4a, 0x24, 0x2f, 0x4a, 0xcf, 0x14, 0xe3, 0x11, 0xdc, 0xdc, 0xc6, 0xec, 0x5c, 0x66,
|
0x81, 0xd7, 0x89, 0x8a, 0xe4, 0x96, 0x29, 0x22, 0x71, 0x3b, 0x5c, 0xdf, 0x84, 0xfc, 0x84, 0xec,
|
||||||
0x1b, 0x7f, 0x9c, 0x60, 0xca, 0x16, 0xdd, 0xcb, 0xd8, 0x87, 0xfa, 0x45, 0x38, 0x0d, 0x49, 0x40,
|
0xe8, 0x1f, 0x48, 0xf7, 0xc9, 0x28, 0x9c, 0x2c, 0x0e, 0x8e, 0xc1, 0x00, 0x86, 0xa6, 0x3d, 0x20,
|
||||||
0x31, 0x5a, 0x85, 0xea, 0xf9, 0xe1, 0x44, 0x5b, 0xad, 0xab, 0x2d, 0x3a, 0x65, 0xaf, 0xc2, 0x77,
|
0x5a, 0x2a, 0x1c, 0x80, 0x08, 0xb6, 0x52, 0x1b, 0x8a, 0x7e, 0x0c, 0x59, 0x29, 0x24, 0x42, 0x90,
|
||||||
0xb0, 0xd3, 0x06, 0xc3, 0x82, 0x5b, 0xbb, 0x1e, 0x4d, 0x69, 0x69, 0x3a, 0x86, 0x3a, 0xf4, 0x7c,
|
0x71, 0x4d, 0x87, 0x48, 0x9c, 0x38, 0x23, 0x03, 0xb2, 0xd4, 0xe3, 0x16, 0x75, 0x99, 0x80, 0x2e,
|
||||||
0x26, 0x39, 0xab, 0xb6, 0xcc, 0x8c, 0xf7, 0xd0, 0xc8, 0x36, 0xc8, 0x41, 0x5e, 0x01, 0xa4, 0xaf,
|
0x92, 0x35, 0x4a, 0xaa, 0x3e, 0x82, 0x7f, 0x0f, 0x09, 0xbf, 0x19, 0x12, 0x26, 0x1f, 0x07, 0x84,
|
||||||
0x9c, 0xa6, 0x08, 0x53, 0x5d, 0x36, 0xc9, 0x5c, 0x87, 0xf1, 0x0e, 0x1a, 0xeb, 0xe2, 0x38, 0x39,
|
0xf1, 0x45, 0x56, 0xab, 0x9e, 0x41, 0xe1, 0xd7, 0x74, 0xe6, 0x51, 0x97, 0x11, 0x74, 0x02, 0xb9,
|
||||||
0x49, 0xfe, 0x6f, 0xc5, 0x03, 0xb8, 0x9d, 0xe3, 0xbd, 0x12, 0xed, 0xbe, 0x28, 0xd0, 0x78, 0x2b,
|
0x9b, 0xb1, 0x0b, 0x58, 0xbe, 0xf1, 0x70, 0x19, 0x73, 0x34, 0x33, 0x81, 0x4c, 0x38, 0x2e, 0x52,
|
||||||
0x9c, 0x70, 0xb5, 0x13, 0xa3, 0x97, 0x50, 0x8b, 0x1d, 0x26, 0x3e, 0x1f, 0xc2, 0x39, 0x45, 0xd6,
|
0x5d, 0x87, 0xff, 0x8e, 0x2c, 0x16, 0xb7, 0x62, 0x11, 0x35, 0x0d, 0xb2, 0x5d, 0xcb, 0xe6, 0xc4,
|
||||||
0xdc, 0xe2, 0x5f, 0x98, 0xd7, 0x0e, 0x3d, 0xb2, 0xa5, 0x91, 0x79, 0xcc, 0xd7, 0xcd, 0x0d, 0x75,
|
0x67, 0x9a, 0x52, 0x49, 0xd7, 0x72, 0x38, 0x0a, 0xab, 0x36, 0x14, 0xa7, 0x21, 0x92, 0x1e, 0x06,
|
||||||
0x25, 0xeb, 0x3e, 0x86, 0xc6, 0x06, 0xf6, 0x71, 0xc1, 0xb6, 0x0b, 0x2c, 0xdb, 0xfd, 0x56, 0x06,
|
0x88, 0x1b, 0x0b, 0xd8, 0xed, 0xf8, 0x4d, 0x54, 0xa9, 0x7e, 0x80, 0xe2, 0xae, 0x70, 0xc5, 0x8c,
|
||||||
0x48, 0x8d, 0x82, 0xde, 0x40, 0x79, 0x1b, 0x33, 0x64, 0x64, 0x7e, 0xb2, 0xe0, 0x25, 0xd0, 0xef,
|
0x78, 0x7f, 0x5f, 0x8c, 0x3e, 0xfc, 0x3f, 0xd3, 0xeb, 0xce, 0x94, 0xff, 0xa6, 0x40, 0xf1, 0x8d,
|
||||||
0xfd, 0x11, 0x23, 0xd7, 0xd9, 0x83, 0x0a, 0xb7, 0x22, 0xca, 0x7e, 0xb9, 0x0a, 0x0d, 0xad, 0x3f,
|
0xb0, 0xea, 0xdd, 0x3f, 0x19, 0xda, 0x86, 0x7c, 0xb8, 0x16, 0xe2, 0xa5, 0x2a, 0x3d, 0x3b, 0xbb,
|
||||||
0xb8, 0x04, 0x25, 0x49, 0x0f, 0x40, 0x8d, 0xdd, 0x82, 0xb2, 0x0d, 0xc5, 0xe6, 0xd4, 0x1f, 0x5e,
|
0x4f, 0x07, 0xc1, 0x7b, 0xf7, 0xd8, 0x64, 0x7d, 0x2c, 0xb7, 0x2f, 0x38, 0x07, 0xb2, 0xcc, 0x10,
|
||||||
0x06, 0x4b, 0x89, 0xe3, 0xbb, 0xe4, 0x88, 0x8b, 0x3d, 0x94, 0x23, 0x5e, 0x74, 0xd5, 0x6d, 0x50,
|
0xbd, 0x33, 0x59, 0x1e, 0x43, 0x71, 0x8f, 0xd8, 0x64, 0x8e, 0x2a, 0x0b, 0x96, 0xa5, 0x71, 0x95,
|
||||||
0xe3, 0xbb, 0xe4, 0x88, 0x8b, 0xcf, 0xa5, 0x37, 0x72, 0x4e, 0xda, 0xe4, 0x7f, 0x64, 0x3d, 0xed,
|
0x01, 0x88, 0xcd, 0x88, 0x86, 0x90, 0x3e, 0x24, 0x1c, 0x3d, 0x4b, 0x40, 0x63, 0xce, 0x4a, 0xea,
|
||||||
0xe4, 0xac, 0xb9, 0xf4, 0xe3, 0xac, 0xb9, 0xf4, 0x69, 0xd6, 0x54, 0x4e, 0x66, 0x4d, 0xe5, 0xfb,
|
0xcf, 0x97, 0xc6, 0x49, 0x29, 0x3e, 0x41, 0x26, 0x58, 0x0b, 0x94, 0xe4, 0x6f, 0x61, 0xee, 0xca,
|
||||||
0xac, 0xa9, 0xfc, 0x9a, 0x35, 0x95, 0xbe, 0x2a, 0x90, 0x4f, 0x7f, 0x07, 0x00, 0x00, 0xff, 0xff,
|
0xe9, 0x9b, 0xb7, 0x40, 0xca, 0xe6, 0x9f, 0x41, 0x0d, 0x9d, 0x8b, 0x92, 0x14, 0x99, 0xbf, 0x50,
|
||||||
0xc1, 0x18, 0xeb, 0x02, 0x8d, 0x07, 0x00, 0x00,
|
0xfa, 0xd6, 0x6d, 0xa0, 0x31, 0x81, 0xd0, 0x23, 0x89, 0x08, 0xcc, 0xf7, 0x7d, 0x22, 0x02, 0x8b,
|
||||||
|
0x9c, 0xf8, 0x0e, 0xd4, 0xd0, 0x37, 0x89, 0x08, 0xcc, 0xb7, 0x98, 0x5e, 0x9c, 0xd9, 0x88, 0xfd,
|
||||||
|
0xe0, 0x33, 0xa5, 0xf9, 0xfe, 0xfc, 0xba, 0xb4, 0xf2, 0xe3, 0xba, 0xb4, 0xf2, 0x65, 0x5c, 0x52,
|
||||||
|
0xce, 0xc7, 0x25, 0xe5, 0xfb, 0xb8, 0xa4, 0x5c, 0x8d, 0x4b, 0xca, 0xdb, 0x83, 0x3f, 0xf8, 0xf2,
|
||||||
|
0xda, 0x8e, 0xa3, 0x96, 0x2a, 0x3a, 0x3e, 0xf9, 0x19, 0x00, 0x00, 0xff, 0xff, 0xa1, 0xaf, 0xe2,
|
||||||
|
0x52, 0xca, 0x09, 0x00, 0x00,
|
||||||
}
|
}
|
@ -1,19 +1,21 @@
|
|||||||
syntax = "proto3";
|
syntax = "proto3";
|
||||||
|
|
||||||
package containerd.v1;
|
package containerd.services.containers.v1;
|
||||||
|
|
||||||
import "gogoproto/gogo.proto";
|
import "gogoproto/gogo.proto";
|
||||||
import "google/protobuf/any.proto";
|
import "google/protobuf/any.proto";
|
||||||
import "google/protobuf/empty.proto";
|
import "google/protobuf/empty.proto";
|
||||||
import "google/protobuf/field_mask.proto";
|
import "google/protobuf/field_mask.proto";
|
||||||
import "google/protobuf/timestamp.proto";
|
import "google/protobuf/timestamp.proto";
|
||||||
import "github.com/containerd/containerd/api/types/descriptor/descriptor.proto";
|
import "github.com/containerd/containerd/api/types/descriptor.proto";
|
||||||
|
|
||||||
|
option go_package = "github.com/containerd/containerd/api/services/containers/v1;containers";
|
||||||
|
|
||||||
// Containers provides metadata storage for containers used in the execution
|
// Containers provides metadata storage for containers used in the execution
|
||||||
// service.
|
// service.
|
||||||
//
|
//
|
||||||
// The objects here provide an state-independent view of containers for use in
|
// The objects here provide an state-independent view of containers for use in
|
||||||
// management and resource pinning. From that perspective, contaienrs do not
|
// management and resource pinning. From that perspective, containers do not
|
||||||
// have a "state" but rather this is the set of resources that will be
|
// have a "state" but rather this is the set of resources that will be
|
||||||
// considered in use by the container.
|
// considered in use by the container.
|
||||||
//
|
//
|
||||||
@ -51,11 +53,20 @@ message Container {
|
|||||||
// If this field is updated, the spec and rootfs needed to updated, as well.
|
// If this field is updated, the spec and rootfs needed to updated, as well.
|
||||||
string image = 3;
|
string image = 3;
|
||||||
|
|
||||||
|
message Runtime {
|
||||||
|
// Name is the name of the runtime.
|
||||||
|
string name = 1;
|
||||||
|
// Options specify additional runtime initialization options.
|
||||||
|
google.protobuf.Any options = 2;
|
||||||
|
}
|
||||||
// Runtime specifies which runtime to use for executing this container.
|
// Runtime specifies which runtime to use for executing this container.
|
||||||
string runtime = 4;
|
Runtime runtime = 4;
|
||||||
|
|
||||||
// Spec to be used when creating the container. This is runtime specific.
|
// Spec to be used when creating the container. This is runtime specific.
|
||||||
google.protobuf.Any spec = 6;
|
google.protobuf.Any spec = 5;
|
||||||
|
|
||||||
|
// Snapshotter specifies the snapshotter name used for rootfs
|
||||||
|
string snapshotter = 6;
|
||||||
|
|
||||||
// RootFS specifies the snapshot key to use for the container's root
|
// RootFS specifies the snapshot key to use for the container's root
|
||||||
// filesystem. When starting a task from this container, a caller should
|
// filesystem. When starting a task from this container, a caller should
|
||||||
@ -67,7 +78,10 @@ message Container {
|
|||||||
// This field may be updated.
|
// This field may be updated.
|
||||||
string rootfs = 7 [(gogoproto.customname) = "RootFS"];
|
string rootfs = 7 [(gogoproto.customname) = "RootFS"];
|
||||||
|
|
||||||
|
// CreatedAt is the time the container was first created.
|
||||||
google.protobuf.Timestamp created_at = 8 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
google.protobuf.Timestamp created_at = 8 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||||
|
|
||||||
|
// UpdatedAt is the last time the container was mutated.
|
||||||
google.protobuf.Timestamp updated_at = 9 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
google.protobuf.Timestamp updated_at = 9 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -80,7 +94,17 @@ message GetContainerResponse {
|
|||||||
}
|
}
|
||||||
|
|
||||||
message ListContainersRequest {
|
message ListContainersRequest {
|
||||||
string filter = 1; // TODO(stevvooe): Define a filtering syntax to make these queries.
|
// Filters contains one or more filters using the syntax defined in the
|
||||||
|
// containerd filter package.
|
||||||
|
//
|
||||||
|
// The returned result will be those that match any of the provided
|
||||||
|
// filters. Expanded, containers that match the following will be
|
||||||
|
// returned:
|
||||||
|
//
|
||||||
|
// filters[0] or filters[1] or ... or filters[n-1] or filters[n]
|
||||||
|
//
|
||||||
|
// If filters is zero-length or nil, all items will be returned.
|
||||||
|
repeated string filters = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message ListContainersResponse {
|
message ListContainersResponse {
|
File diff suppressed because it is too large
Load Diff
@ -1,11 +1,14 @@
|
|||||||
syntax = "proto3";
|
syntax = "proto3";
|
||||||
|
|
||||||
package containerd.v1;
|
package containerd.services.content.v1;
|
||||||
|
|
||||||
import "gogoproto/gogo.proto";
|
import "gogoproto/gogo.proto";
|
||||||
|
import "google/protobuf/field_mask.proto";
|
||||||
import "google/protobuf/timestamp.proto";
|
import "google/protobuf/timestamp.proto";
|
||||||
import "google/protobuf/empty.proto";
|
import "google/protobuf/empty.proto";
|
||||||
|
|
||||||
|
option go_package = "github.com/containerd/containerd/api/services/content/v1;content";
|
||||||
|
|
||||||
// Content provides access to a content addressable storage system.
|
// Content provides access to a content addressable storage system.
|
||||||
service Content {
|
service Content {
|
||||||
// Info returns information about a committed object.
|
// Info returns information about a committed object.
|
||||||
@ -14,6 +17,13 @@ service Content {
|
|||||||
// existence.
|
// existence.
|
||||||
rpc Info(InfoRequest) returns (InfoResponse);
|
rpc Info(InfoRequest) returns (InfoResponse);
|
||||||
|
|
||||||
|
// Update updates content metadata.
|
||||||
|
//
|
||||||
|
// This call can be used to manage the mutable content labels. The
|
||||||
|
// immutable metadata such as digest, size, and committed at cannot
|
||||||
|
// be updated.
|
||||||
|
rpc Update(UpdateRequest) returns (UpdateResponse);
|
||||||
|
|
||||||
// List streams the entire set of content as Info objects and closes the
|
// List streams the entire set of content as Info objects and closes the
|
||||||
// stream.
|
// stream.
|
||||||
//
|
//
|
||||||
@ -28,15 +38,18 @@ service Content {
|
|||||||
// Read allows one to read an object based on the offset into the content.
|
// Read allows one to read an object based on the offset into the content.
|
||||||
//
|
//
|
||||||
// The requested data may be returned in one or more messages.
|
// The requested data may be returned in one or more messages.
|
||||||
rpc Read(ReadRequest) returns (stream ReadResponse);
|
rpc Read(ReadContentRequest) returns (stream ReadContentResponse);
|
||||||
|
|
||||||
// Status returns the status of ongoing object ingestions, started via
|
// Status returns the status for a single reference.
|
||||||
|
rpc Status(StatusRequest) returns (StatusResponse);
|
||||||
|
|
||||||
|
// ListStatuses returns the status of ongoing object ingestions, started via
|
||||||
// Write.
|
// Write.
|
||||||
//
|
//
|
||||||
// Only those matching the regular expression will be provided in the
|
// Only those matching the regular expression will be provided in the
|
||||||
// response. If the provided regular expression is empty, all ingestions
|
// response. If the provided regular expression is empty, all ingestions
|
||||||
// will be provided.
|
// will be provided.
|
||||||
rpc Status(StatusRequest) returns (StatusResponse);
|
rpc ListStatuses(ListStatusesRequest) returns (ListStatusesResponse);
|
||||||
|
|
||||||
// Write begins or resumes writes to a resource identified by a unique ref.
|
// Write begins or resumes writes to a resource identified by a unique ref.
|
||||||
// Only one active stream may exist at a time for each ref.
|
// Only one active stream may exist at a time for each ref.
|
||||||
@ -54,7 +67,7 @@ service Content {
|
|||||||
//
|
//
|
||||||
// When completed, the commit flag should be set to true. If expected size
|
// When completed, the commit flag should be set to true. If expected size
|
||||||
// or digest is set, the content will be validated against those values.
|
// or digest is set, the content will be validated against those values.
|
||||||
rpc Write(stream WriteRequest) returns (stream WriteResponse);
|
rpc Write(stream WriteContentRequest) returns (stream WriteContentResponse);
|
||||||
|
|
||||||
// Abort cancels the ongoing write named in the request. Any resources
|
// Abort cancels the ongoing write named in the request. Any resources
|
||||||
// associated with the write will be collected.
|
// associated with the write will be collected.
|
||||||
@ -68,8 +81,14 @@ message Info {
|
|||||||
// Size is the total number of bytes in the blob.
|
// Size is the total number of bytes in the blob.
|
||||||
int64 size = 2;
|
int64 size = 2;
|
||||||
|
|
||||||
// CommittedAt provides the time at which the blob was committed.
|
// CreatedAt provides the time at which the blob was committed.
|
||||||
google.protobuf.Timestamp committed_at = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
google.protobuf.Timestamp created_at = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||||
|
|
||||||
|
// UpdatedAt provides the time the info was last updated.
|
||||||
|
google.protobuf.Timestamp updated_at = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||||
|
|
||||||
|
// Labels are arbitrary data on content.
|
||||||
|
map<string, string> labels = 5;
|
||||||
}
|
}
|
||||||
|
|
||||||
message InfoRequest {
|
message InfoRequest {
|
||||||
@ -80,7 +99,35 @@ message InfoResponse {
|
|||||||
Info info = 1 [(gogoproto.nullable) = false];
|
Info info = 1 [(gogoproto.nullable) = false];
|
||||||
}
|
}
|
||||||
|
|
||||||
message ListContentRequest {}
|
message UpdateRequest {
|
||||||
|
Info info = 1 [(gogoproto.nullable) = false];
|
||||||
|
|
||||||
|
// UpdateMask specifies which fields to perform the update on. If empty,
|
||||||
|
// the operation applies to all fields.
|
||||||
|
//
|
||||||
|
// In info, Digest, Size, and CreatedAt are immutable,
|
||||||
|
// other field may be updated using this mask.
|
||||||
|
// If no mask is provided, all mutable field are updated.
|
||||||
|
google.protobuf.FieldMask update_mask = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message UpdateResponse {
|
||||||
|
Info info = 1 [(gogoproto.nullable) = false];
|
||||||
|
}
|
||||||
|
|
||||||
|
message ListContentRequest {
|
||||||
|
// Filters contains one or more filters using the syntax defined in the
|
||||||
|
// containerd filter package.
|
||||||
|
//
|
||||||
|
// The returned result will be those that match any of the provided
|
||||||
|
// filters. Expanded, containers that match the following will be
|
||||||
|
// returned:
|
||||||
|
//
|
||||||
|
// filters[0] or filters[1] or ... or filters[n-1] or filters[n]
|
||||||
|
//
|
||||||
|
// If filters is zero-length or nil, all items will be returned.
|
||||||
|
repeated string filters = 1;
|
||||||
|
}
|
||||||
|
|
||||||
message ListContentResponse {
|
message ListContentResponse {
|
||||||
repeated Info info = 1 [(gogoproto.nullable) = false];
|
repeated Info info = 1 [(gogoproto.nullable) = false];
|
||||||
@ -91,9 +138,9 @@ message DeleteContentRequest {
|
|||||||
string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReadRequest defines the fields that make up a request to read a portion of
|
// ReadContentRequest defines the fields that make up a request to read a portion of
|
||||||
// data from a stored object.
|
// data from a stored object.
|
||||||
message ReadRequest {
|
message ReadContentRequest {
|
||||||
// Digest is the hash identity to read.
|
// Digest is the hash identity to read.
|
||||||
string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
||||||
|
|
||||||
@ -107,16 +154,12 @@ message ReadRequest {
|
|||||||
int64 size = 3;
|
int64 size = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReadResponse carries byte data for a read request.
|
// ReadContentResponse carries byte data for a read request.
|
||||||
message ReadResponse {
|
message ReadContentResponse {
|
||||||
int64 offset = 1; // offset of the returned data
|
int64 offset = 1; // offset of the returned data
|
||||||
bytes data = 2; // actual data
|
bytes data = 2; // actual data
|
||||||
}
|
}
|
||||||
|
|
||||||
message StatusRequest {
|
|
||||||
string regexp = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message Status {
|
message Status {
|
||||||
google.protobuf.Timestamp started_at = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
google.protobuf.Timestamp started_at = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||||
google.protobuf.Timestamp updated_at = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
google.protobuf.Timestamp updated_at = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||||
@ -126,7 +169,20 @@ message Status {
|
|||||||
string expected = 6 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
string expected = 6 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
message StatusRequest {
|
||||||
|
string ref = 1;
|
||||||
|
}
|
||||||
|
|
||||||
message StatusResponse {
|
message StatusResponse {
|
||||||
|
Status status = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ListStatusesRequest {
|
||||||
|
repeated string filters = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ListStatusesResponse {
|
||||||
repeated Status statuses = 1 [(gogoproto.nullable) = false];
|
repeated Status statuses = 1 [(gogoproto.nullable) = false];
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -157,8 +213,8 @@ enum WriteAction {
|
|||||||
COMMIT = 2 [(gogoproto.enumvalue_customname) = "WriteActionCommit"];
|
COMMIT = 2 [(gogoproto.enumvalue_customname) = "WriteActionCommit"];
|
||||||
}
|
}
|
||||||
|
|
||||||
// WriteRequest writes data to the request ref at offset.
|
// WriteContentRequest writes data to the request ref at offset.
|
||||||
message WriteRequest {
|
message WriteContentRequest {
|
||||||
// Action sets the behavior of the write.
|
// Action sets the behavior of the write.
|
||||||
//
|
//
|
||||||
// When this is a write and the ref is not yet allocated, the ref will be
|
// When this is a write and the ref is not yet allocated, the ref will be
|
||||||
@ -215,8 +271,8 @@ message WriteRequest {
|
|||||||
bytes data = 6;
|
bytes data = 6;
|
||||||
}
|
}
|
||||||
|
|
||||||
// WriteResponse is returned on the culmination of a write call.
|
// WriteContentResponse is returned on the culmination of a write call.
|
||||||
message WriteResponse {
|
message WriteContentResponse {
|
||||||
// Action contains the action for the final message of the stream. A writer
|
// Action contains the action for the final message of the stream. A writer
|
||||||
// should confirm that they match the intended result.
|
// should confirm that they match the intended result.
|
||||||
WriteAction action = 1;
|
WriteAction action = 1;
|
@ -1,12 +1,12 @@
|
|||||||
// Code generated by protoc-gen-gogo.
|
// Code generated by protoc-gen-gogo.
|
||||||
// source: github.com/containerd/containerd/api/services/diff/diff.proto
|
// source: github.com/containerd/containerd/api/services/diff/v1/diff.proto
|
||||||
// DO NOT EDIT!
|
// DO NOT EDIT!
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Package diff is a generated protocol buffer package.
|
Package diff is a generated protocol buffer package.
|
||||||
|
|
||||||
It is generated from these files:
|
It is generated from these files:
|
||||||
github.com/containerd/containerd/api/services/diff/diff.proto
|
github.com/containerd/containerd/api/services/diff/v1/diff.proto
|
||||||
|
|
||||||
It has these top-level messages:
|
It has these top-level messages:
|
||||||
ApplyRequest
|
ApplyRequest
|
||||||
@ -22,8 +22,8 @@ import math "math"
|
|||||||
import _ "github.com/gogo/protobuf/gogoproto"
|
import _ "github.com/gogo/protobuf/gogoproto"
|
||||||
import _ "github.com/golang/protobuf/ptypes/empty"
|
import _ "github.com/golang/protobuf/ptypes/empty"
|
||||||
import _ "github.com/gogo/protobuf/types"
|
import _ "github.com/gogo/protobuf/types"
|
||||||
import containerd_v1_types "github.com/containerd/containerd/api/types/mount"
|
import containerd_types "github.com/containerd/containerd/api/types"
|
||||||
import containerd_v1_types1 "github.com/containerd/containerd/api/types/descriptor"
|
import containerd_types1 "github.com/containerd/containerd/api/types"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
context "golang.org/x/net/context"
|
context "golang.org/x/net/context"
|
||||||
@ -48,8 +48,8 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
|||||||
|
|
||||||
type ApplyRequest struct {
|
type ApplyRequest struct {
|
||||||
// Diff is the descriptor of the diff to be extracted
|
// Diff is the descriptor of the diff to be extracted
|
||||||
Diff *containerd_v1_types1.Descriptor `protobuf:"bytes,1,opt,name=diff" json:"diff,omitempty"`
|
Diff *containerd_types1.Descriptor `protobuf:"bytes,1,opt,name=diff" json:"diff,omitempty"`
|
||||||
Mounts []*containerd_v1_types.Mount `protobuf:"bytes,2,rep,name=mounts" json:"mounts,omitempty"`
|
Mounts []*containerd_types.Mount `protobuf:"bytes,2,rep,name=mounts" json:"mounts,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *ApplyRequest) Reset() { *m = ApplyRequest{} }
|
func (m *ApplyRequest) Reset() { *m = ApplyRequest{} }
|
||||||
@ -60,7 +60,7 @@ type ApplyResponse struct {
|
|||||||
// Applied is the descriptor for the object which was applied.
|
// Applied is the descriptor for the object which was applied.
|
||||||
// If the input was a compressed blob then the result will be
|
// If the input was a compressed blob then the result will be
|
||||||
// the descriptor for the uncompressed blob.
|
// the descriptor for the uncompressed blob.
|
||||||
Applied *containerd_v1_types1.Descriptor `protobuf:"bytes,1,opt,name=applied" json:"applied,omitempty"`
|
Applied *containerd_types1.Descriptor `protobuf:"bytes,1,opt,name=applied" json:"applied,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *ApplyResponse) Reset() { *m = ApplyResponse{} }
|
func (m *ApplyResponse) Reset() { *m = ApplyResponse{} }
|
||||||
@ -70,16 +70,16 @@ func (*ApplyResponse) Descriptor() ([]byte, []int) { return fileDescriptorDiff,
|
|||||||
type DiffRequest struct {
|
type DiffRequest struct {
|
||||||
// Left are the mounts which represent the older copy
|
// Left are the mounts which represent the older copy
|
||||||
// in which is the base of the computed changes.
|
// in which is the base of the computed changes.
|
||||||
Left []*containerd_v1_types.Mount `protobuf:"bytes,1,rep,name=left" json:"left,omitempty"`
|
Left []*containerd_types.Mount `protobuf:"bytes,1,rep,name=left" json:"left,omitempty"`
|
||||||
// Right are the mounts which represents the newer copy
|
// Right are the mounts which represents the newer copy
|
||||||
// in which changes from the left were made into.
|
// in which changes from the left were made into.
|
||||||
Right []*containerd_v1_types.Mount `protobuf:"bytes,2,rep,name=right" json:"right,omitempty"`
|
Right []*containerd_types.Mount `protobuf:"bytes,2,rep,name=right" json:"right,omitempty"`
|
||||||
// MediaType is the media type descriptor for the created diff
|
// MediaType is the media type descriptor for the created diff
|
||||||
// object
|
// object
|
||||||
MediaType string `protobuf:"bytes,3,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"`
|
MediaType string `protobuf:"bytes,3,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"`
|
||||||
// Ref identifies the pre-commit content store object. This
|
// Ref identifies the pre-commit content store object. This
|
||||||
// reference can be used to get the status from the content store.
|
// reference can be used to get the status from the content store.
|
||||||
Ref string `protobuf:"bytes,5,opt,name=ref,proto3" json:"ref,omitempty"`
|
Ref string `protobuf:"bytes,4,opt,name=ref,proto3" json:"ref,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *DiffRequest) Reset() { *m = DiffRequest{} }
|
func (m *DiffRequest) Reset() { *m = DiffRequest{} }
|
||||||
@ -88,7 +88,7 @@ func (*DiffRequest) Descriptor() ([]byte, []int) { return fileDescriptorDiff, []
|
|||||||
|
|
||||||
type DiffResponse struct {
|
type DiffResponse struct {
|
||||||
// Diff is the descriptor of the diff which can be applied
|
// Diff is the descriptor of the diff which can be applied
|
||||||
Diff *containerd_v1_types1.Descriptor `protobuf:"bytes,3,opt,name=diff" json:"diff,omitempty"`
|
Diff *containerd_types1.Descriptor `protobuf:"bytes,3,opt,name=diff" json:"diff,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *DiffResponse) Reset() { *m = DiffResponse{} }
|
func (m *DiffResponse) Reset() { *m = DiffResponse{} }
|
||||||
@ -96,10 +96,10 @@ func (*DiffResponse) ProtoMessage() {}
|
|||||||
func (*DiffResponse) Descriptor() ([]byte, []int) { return fileDescriptorDiff, []int{3} }
|
func (*DiffResponse) Descriptor() ([]byte, []int) { return fileDescriptorDiff, []int{3} }
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
proto.RegisterType((*ApplyRequest)(nil), "containerd.v1.ApplyRequest")
|
proto.RegisterType((*ApplyRequest)(nil), "containerd.services.diff.v1.ApplyRequest")
|
||||||
proto.RegisterType((*ApplyResponse)(nil), "containerd.v1.ApplyResponse")
|
proto.RegisterType((*ApplyResponse)(nil), "containerd.services.diff.v1.ApplyResponse")
|
||||||
proto.RegisterType((*DiffRequest)(nil), "containerd.v1.DiffRequest")
|
proto.RegisterType((*DiffRequest)(nil), "containerd.services.diff.v1.DiffRequest")
|
||||||
proto.RegisterType((*DiffResponse)(nil), "containerd.v1.DiffResponse")
|
proto.RegisterType((*DiffResponse)(nil), "containerd.services.diff.v1.DiffResponse")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
@ -132,7 +132,7 @@ func NewDiffClient(cc *grpc.ClientConn) DiffClient {
|
|||||||
|
|
||||||
func (c *diffClient) Apply(ctx context.Context, in *ApplyRequest, opts ...grpc.CallOption) (*ApplyResponse, error) {
|
func (c *diffClient) Apply(ctx context.Context, in *ApplyRequest, opts ...grpc.CallOption) (*ApplyResponse, error) {
|
||||||
out := new(ApplyResponse)
|
out := new(ApplyResponse)
|
||||||
err := grpc.Invoke(ctx, "/containerd.v1.Diff/Apply", in, out, c.cc, opts...)
|
err := grpc.Invoke(ctx, "/containerd.services.diff.v1.Diff/Apply", in, out, c.cc, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -141,7 +141,7 @@ func (c *diffClient) Apply(ctx context.Context, in *ApplyRequest, opts ...grpc.C
|
|||||||
|
|
||||||
func (c *diffClient) Diff(ctx context.Context, in *DiffRequest, opts ...grpc.CallOption) (*DiffResponse, error) {
|
func (c *diffClient) Diff(ctx context.Context, in *DiffRequest, opts ...grpc.CallOption) (*DiffResponse, error) {
|
||||||
out := new(DiffResponse)
|
out := new(DiffResponse)
|
||||||
err := grpc.Invoke(ctx, "/containerd.v1.Diff/Diff", in, out, c.cc, opts...)
|
err := grpc.Invoke(ctx, "/containerd.services.diff.v1.Diff/Diff", in, out, c.cc, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -174,7 +174,7 @@ func _Diff_Apply_Handler(srv interface{}, ctx context.Context, dec func(interfac
|
|||||||
}
|
}
|
||||||
info := &grpc.UnaryServerInfo{
|
info := &grpc.UnaryServerInfo{
|
||||||
Server: srv,
|
Server: srv,
|
||||||
FullMethod: "/containerd.v1.Diff/Apply",
|
FullMethod: "/containerd.services.diff.v1.Diff/Apply",
|
||||||
}
|
}
|
||||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
return srv.(DiffServer).Apply(ctx, req.(*ApplyRequest))
|
return srv.(DiffServer).Apply(ctx, req.(*ApplyRequest))
|
||||||
@ -192,7 +192,7 @@ func _Diff_Diff_Handler(srv interface{}, ctx context.Context, dec func(interface
|
|||||||
}
|
}
|
||||||
info := &grpc.UnaryServerInfo{
|
info := &grpc.UnaryServerInfo{
|
||||||
Server: srv,
|
Server: srv,
|
||||||
FullMethod: "/containerd.v1.Diff/Diff",
|
FullMethod: "/containerd.services.diff.v1.Diff/Diff",
|
||||||
}
|
}
|
||||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
return srv.(DiffServer).Diff(ctx, req.(*DiffRequest))
|
return srv.(DiffServer).Diff(ctx, req.(*DiffRequest))
|
||||||
@ -201,7 +201,7 @@ func _Diff_Diff_Handler(srv interface{}, ctx context.Context, dec func(interface
|
|||||||
}
|
}
|
||||||
|
|
||||||
var _Diff_serviceDesc = grpc.ServiceDesc{
|
var _Diff_serviceDesc = grpc.ServiceDesc{
|
||||||
ServiceName: "containerd.v1.Diff",
|
ServiceName: "containerd.services.diff.v1.Diff",
|
||||||
HandlerType: (*DiffServer)(nil),
|
HandlerType: (*DiffServer)(nil),
|
||||||
Methods: []grpc.MethodDesc{
|
Methods: []grpc.MethodDesc{
|
||||||
{
|
{
|
||||||
@ -214,7 +214,7 @@ var _Diff_serviceDesc = grpc.ServiceDesc{
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
Streams: []grpc.StreamDesc{},
|
Streams: []grpc.StreamDesc{},
|
||||||
Metadata: "github.com/containerd/containerd/api/services/diff/diff.proto",
|
Metadata: "github.com/containerd/containerd/api/services/diff/v1/diff.proto",
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *ApplyRequest) Marshal() (dAtA []byte, err error) {
|
func (m *ApplyRequest) Marshal() (dAtA []byte, err error) {
|
||||||
@ -331,7 +331,7 @@ func (m *DiffRequest) MarshalTo(dAtA []byte) (int, error) {
|
|||||||
i += copy(dAtA[i:], m.MediaType)
|
i += copy(dAtA[i:], m.MediaType)
|
||||||
}
|
}
|
||||||
if len(m.Ref) > 0 {
|
if len(m.Ref) > 0 {
|
||||||
dAtA[i] = 0x2a
|
dAtA[i] = 0x22
|
||||||
i++
|
i++
|
||||||
i = encodeVarintDiff(dAtA, i, uint64(len(m.Ref)))
|
i = encodeVarintDiff(dAtA, i, uint64(len(m.Ref)))
|
||||||
i += copy(dAtA[i:], m.Ref)
|
i += copy(dAtA[i:], m.Ref)
|
||||||
@ -474,8 +474,8 @@ func (this *ApplyRequest) String() string {
|
|||||||
return "nil"
|
return "nil"
|
||||||
}
|
}
|
||||||
s := strings.Join([]string{`&ApplyRequest{`,
|
s := strings.Join([]string{`&ApplyRequest{`,
|
||||||
`Diff:` + strings.Replace(fmt.Sprintf("%v", this.Diff), "Descriptor", "containerd_v1_types1.Descriptor", 1) + `,`,
|
`Diff:` + strings.Replace(fmt.Sprintf("%v", this.Diff), "Descriptor", "containerd_types1.Descriptor", 1) + `,`,
|
||||||
`Mounts:` + strings.Replace(fmt.Sprintf("%v", this.Mounts), "Mount", "containerd_v1_types.Mount", 1) + `,`,
|
`Mounts:` + strings.Replace(fmt.Sprintf("%v", this.Mounts), "Mount", "containerd_types.Mount", 1) + `,`,
|
||||||
`}`,
|
`}`,
|
||||||
}, "")
|
}, "")
|
||||||
return s
|
return s
|
||||||
@ -485,7 +485,7 @@ func (this *ApplyResponse) String() string {
|
|||||||
return "nil"
|
return "nil"
|
||||||
}
|
}
|
||||||
s := strings.Join([]string{`&ApplyResponse{`,
|
s := strings.Join([]string{`&ApplyResponse{`,
|
||||||
`Applied:` + strings.Replace(fmt.Sprintf("%v", this.Applied), "Descriptor", "containerd_v1_types1.Descriptor", 1) + `,`,
|
`Applied:` + strings.Replace(fmt.Sprintf("%v", this.Applied), "Descriptor", "containerd_types1.Descriptor", 1) + `,`,
|
||||||
`}`,
|
`}`,
|
||||||
}, "")
|
}, "")
|
||||||
return s
|
return s
|
||||||
@ -495,8 +495,8 @@ func (this *DiffRequest) String() string {
|
|||||||
return "nil"
|
return "nil"
|
||||||
}
|
}
|
||||||
s := strings.Join([]string{`&DiffRequest{`,
|
s := strings.Join([]string{`&DiffRequest{`,
|
||||||
`Left:` + strings.Replace(fmt.Sprintf("%v", this.Left), "Mount", "containerd_v1_types.Mount", 1) + `,`,
|
`Left:` + strings.Replace(fmt.Sprintf("%v", this.Left), "Mount", "containerd_types.Mount", 1) + `,`,
|
||||||
`Right:` + strings.Replace(fmt.Sprintf("%v", this.Right), "Mount", "containerd_v1_types.Mount", 1) + `,`,
|
`Right:` + strings.Replace(fmt.Sprintf("%v", this.Right), "Mount", "containerd_types.Mount", 1) + `,`,
|
||||||
`MediaType:` + fmt.Sprintf("%v", this.MediaType) + `,`,
|
`MediaType:` + fmt.Sprintf("%v", this.MediaType) + `,`,
|
||||||
`Ref:` + fmt.Sprintf("%v", this.Ref) + `,`,
|
`Ref:` + fmt.Sprintf("%v", this.Ref) + `,`,
|
||||||
`}`,
|
`}`,
|
||||||
@ -508,7 +508,7 @@ func (this *DiffResponse) String() string {
|
|||||||
return "nil"
|
return "nil"
|
||||||
}
|
}
|
||||||
s := strings.Join([]string{`&DiffResponse{`,
|
s := strings.Join([]string{`&DiffResponse{`,
|
||||||
`Diff:` + strings.Replace(fmt.Sprintf("%v", this.Diff), "Descriptor", "containerd_v1_types1.Descriptor", 1) + `,`,
|
`Diff:` + strings.Replace(fmt.Sprintf("%v", this.Diff), "Descriptor", "containerd_types1.Descriptor", 1) + `,`,
|
||||||
`}`,
|
`}`,
|
||||||
}, "")
|
}, "")
|
||||||
return s
|
return s
|
||||||
@ -577,7 +577,7 @@ func (m *ApplyRequest) Unmarshal(dAtA []byte) error {
|
|||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
if m.Diff == nil {
|
if m.Diff == nil {
|
||||||
m.Diff = &containerd_v1_types1.Descriptor{}
|
m.Diff = &containerd_types1.Descriptor{}
|
||||||
}
|
}
|
||||||
if err := m.Diff.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
if err := m.Diff.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -609,7 +609,7 @@ func (m *ApplyRequest) Unmarshal(dAtA []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.Mounts = append(m.Mounts, &containerd_v1_types.Mount{})
|
m.Mounts = append(m.Mounts, &containerd_types.Mount{})
|
||||||
if err := m.Mounts[len(m.Mounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
if err := m.Mounts[len(m.Mounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -691,7 +691,7 @@ func (m *ApplyResponse) Unmarshal(dAtA []byte) error {
|
|||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
if m.Applied == nil {
|
if m.Applied == nil {
|
||||||
m.Applied = &containerd_v1_types1.Descriptor{}
|
m.Applied = &containerd_types1.Descriptor{}
|
||||||
}
|
}
|
||||||
if err := m.Applied.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
if err := m.Applied.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -773,7 +773,7 @@ func (m *DiffRequest) Unmarshal(dAtA []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.Left = append(m.Left, &containerd_v1_types.Mount{})
|
m.Left = append(m.Left, &containerd_types.Mount{})
|
||||||
if err := m.Left[len(m.Left)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
if err := m.Left[len(m.Left)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -804,7 +804,7 @@ func (m *DiffRequest) Unmarshal(dAtA []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.Right = append(m.Right, &containerd_v1_types.Mount{})
|
m.Right = append(m.Right, &containerd_types.Mount{})
|
||||||
if err := m.Right[len(m.Right)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
if err := m.Right[len(m.Right)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -838,7 +838,7 @@ func (m *DiffRequest) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
m.MediaType = string(dAtA[iNdEx:postIndex])
|
m.MediaType = string(dAtA[iNdEx:postIndex])
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
case 5:
|
case 4:
|
||||||
if wireType != 2 {
|
if wireType != 2 {
|
||||||
return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType)
|
return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType)
|
||||||
}
|
}
|
||||||
@ -944,7 +944,7 @@ func (m *DiffResponse) Unmarshal(dAtA []byte) error {
|
|||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
if m.Diff == nil {
|
if m.Diff == nil {
|
||||||
m.Diff = &containerd_v1_types1.Descriptor{}
|
m.Diff = &containerd_types1.Descriptor{}
|
||||||
}
|
}
|
||||||
if err := m.Diff.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
if err := m.Diff.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -1077,35 +1077,36 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
proto.RegisterFile("github.com/containerd/containerd/api/services/diff/diff.proto", fileDescriptorDiff)
|
proto.RegisterFile("github.com/containerd/containerd/api/services/diff/v1/diff.proto", fileDescriptorDiff)
|
||||||
}
|
}
|
||||||
|
|
||||||
var fileDescriptorDiff = []byte{
|
var fileDescriptorDiff = []byte{
|
||||||
// 407 bytes of a gzipped FileDescriptorProto
|
// 427 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x52, 0x41, 0xcf, 0x93, 0x40,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x93, 0x31, 0x6f, 0xd4, 0x30,
|
||||||
0x10, 0xfd, 0x56, 0xda, 0xcf, 0x74, 0xdb, 0x26, 0x66, 0xe3, 0x81, 0x50, 0xa5, 0x0d, 0xa7, 0x9e,
|
0x14, 0xc7, 0x6b, 0xee, 0x5a, 0x54, 0x5f, 0x91, 0x90, 0x85, 0x44, 0x94, 0x42, 0x38, 0x65, 0x4a,
|
||||||
0x40, 0xe9, 0xc9, 0x44, 0x63, 0xac, 0x8d, 0x07, 0x13, 0x2f, 0xc4, 0xbb, 0xa1, 0x30, 0xd0, 0x4d,
|
0x41, 0xd8, 0xf4, 0x90, 0x3a, 0xd0, 0xa5, 0xa0, 0x4a, 0x4c, 0x2c, 0x51, 0x27, 0x90, 0x40, 0xb9,
|
||||||
0x80, 0x5d, 0xd9, 0xa5, 0x86, 0x9b, 0x77, 0xff, 0x84, 0x3f, 0xa7, 0x47, 0x8f, 0x1e, 0x2d, 0xbf,
|
0xe4, 0x25, 0xb5, 0x94, 0xc4, 0x6e, 0xec, 0x9c, 0x94, 0x8d, 0xcf, 0xc1, 0xd7, 0x61, 0xe9, 0xc8,
|
||||||
0xc4, 0xb0, 0x6c, 0x15, 0x9b, 0x26, 0xb6, 0x97, 0xcd, 0xb0, 0xef, 0xcd, 0x9b, 0x37, 0x8f, 0xc5,
|
0xc8, 0x48, 0xf3, 0x49, 0x50, 0x1c, 0x07, 0x22, 0x90, 0x8e, 0xd0, 0xc9, 0x2f, 0x7e, 0xbf, 0xff,
|
||||||
0xaf, 0x52, 0x2a, 0x77, 0xd5, 0xd6, 0x8d, 0x58, 0xee, 0x45, 0xac, 0x90, 0x21, 0x2d, 0xa0, 0x8c,
|
0x7b, 0x7f, 0xbf, 0xd8, 0xf8, 0x2c, 0xe3, 0xfa, 0xb2, 0x5e, 0xd3, 0x58, 0x14, 0x2c, 0x16, 0xa5,
|
||||||
0xfb, 0x65, 0xc8, 0xa9, 0x27, 0xa0, 0xdc, 0xd3, 0x08, 0x84, 0x17, 0xd3, 0x24, 0x51, 0x87, 0xcb,
|
0x8e, 0x78, 0x09, 0x55, 0x32, 0x0e, 0x23, 0xc9, 0x99, 0x82, 0x6a, 0xc3, 0x63, 0x50, 0x2c, 0xe1,
|
||||||
0x4b, 0x26, 0x19, 0x99, 0xfe, 0x25, 0xba, 0xfb, 0xe7, 0xd6, 0xe3, 0x94, 0xa5, 0x4c, 0x21, 0x5e,
|
0x69, 0xca, 0x36, 0xc7, 0x66, 0xa5, 0xb2, 0x12, 0x5a, 0x90, 0xc3, 0xdf, 0x2c, 0x1d, 0x38, 0x6a,
|
||||||
0x5b, 0x75, 0x24, 0x6b, 0x96, 0x32, 0x96, 0x66, 0xe0, 0xa9, 0xaf, 0x6d, 0x95, 0x78, 0x90, 0x73,
|
0xf2, 0x9b, 0x63, 0xf7, 0x41, 0x26, 0x32, 0x61, 0x38, 0xd6, 0x45, 0xbd, 0xc4, 0x3d, 0xcc, 0x84,
|
||||||
0x59, 0x6b, 0x70, 0x7e, 0x0e, 0x4a, 0x9a, 0x83, 0x90, 0x61, 0xce, 0x35, 0xe1, 0xe5, 0x55, 0x0e,
|
0xc8, 0x72, 0x60, 0xe6, 0x6b, 0x5d, 0xa7, 0x0c, 0x0a, 0xa9, 0x1b, 0x9b, 0x7c, 0xf2, 0x67, 0x52,
|
||||||
0x65, 0xcd, 0x41, 0x78, 0x39, 0xab, 0x0a, 0xd9, 0x9d, 0xba, 0xfb, 0xdd, 0x0d, 0xdd, 0x31, 0x88,
|
0xf3, 0x02, 0x94, 0x8e, 0x0a, 0x69, 0x81, 0x93, 0x49, 0x96, 0x75, 0x23, 0x41, 0xb1, 0x42, 0xd4,
|
||||||
0xa8, 0xa4, 0x5c, 0xb2, 0xb2, 0x57, 0x76, 0x3a, 0xce, 0x17, 0x3c, 0x79, 0xc3, 0x79, 0x56, 0x07,
|
0xa5, 0xb6, 0xba, 0xd3, 0xff, 0xd0, 0x25, 0xa0, 0xe2, 0x8a, 0x4b, 0x2d, 0xaa, 0x5e, 0xec, 0x5f,
|
||||||
0xf0, 0xb9, 0x02, 0x21, 0xc9, 0x0a, 0x0f, 0xda, 0x18, 0x4c, 0xb4, 0x40, 0xcb, 0xb1, 0x3f, 0x77,
|
0xe1, 0x83, 0xd7, 0x52, 0xe6, 0x4d, 0x08, 0x57, 0x35, 0x28, 0x4d, 0x5e, 0xe0, 0x79, 0x77, 0x46,
|
||||||
0xff, 0xc9, 0xc1, 0x55, 0x7a, 0xee, 0xe6, 0x8f, 0x48, 0xa0, 0xc8, 0xc4, 0xc7, 0xf7, 0xca, 0x9b,
|
0x07, 0x2d, 0x51, 0xb0, 0x58, 0x3d, 0xa2, 0xa3, 0x21, 0x98, 0x0a, 0xf4, 0xfc, 0x57, 0x85, 0xd0,
|
||||||
0x30, 0x1f, 0x2c, 0x8c, 0xe5, 0xd8, 0xb7, 0x2e, 0xb6, 0x7d, 0x68, 0x29, 0x81, 0x66, 0x3a, 0xef,
|
0x90, 0x84, 0xe1, 0x3d, 0xe3, 0x46, 0x39, 0x77, 0x96, 0xb3, 0x60, 0xb1, 0x7a, 0xf8, 0xb7, 0xe6,
|
||||||
0xf1, 0x54, 0x0f, 0x16, 0x9c, 0x15, 0x02, 0xc8, 0x0b, 0xfc, 0x30, 0xe4, 0x3c, 0xa3, 0x10, 0x5f,
|
0x5d, 0x97, 0x0f, 0x2d, 0xe6, 0xbf, 0xc5, 0xf7, 0x6c, 0x4b, 0x25, 0x45, 0xa9, 0x80, 0x9c, 0xe0,
|
||||||
0x3b, 0xfc, 0xc4, 0x77, 0xbe, 0x23, 0x3c, 0xde, 0xd0, 0x24, 0x39, 0x2d, 0xe1, 0xe2, 0x41, 0x06,
|
0xbb, 0x91, 0x94, 0x39, 0x87, 0x64, 0x52, 0xdb, 0x01, 0xf6, 0xbf, 0x20, 0xbc, 0x38, 0xe7, 0x69,
|
||||||
0x89, 0x34, 0xd1, 0x7f, 0xdd, 0x28, 0x1e, 0x79, 0x86, 0x87, 0x25, 0x4d, 0x77, 0xf2, 0x0a, 0xfb,
|
0x3a, 0x78, 0x7f, 0x86, 0xe7, 0x39, 0xa4, 0xda, 0x41, 0xdb, 0x7d, 0x18, 0x88, 0x3c, 0xc7, 0xbb,
|
||||||
0x1d, 0x91, 0x3c, 0xc5, 0x38, 0x87, 0x98, 0x86, 0x9f, 0x5a, 0xcc, 0x34, 0x16, 0x68, 0x39, 0x0a,
|
0x15, 0xcf, 0x2e, 0xf5, 0xbf, 0x5c, 0xf7, 0x14, 0x79, 0x8c, 0x71, 0x01, 0x09, 0x8f, 0x3e, 0x75,
|
||||||
0x46, 0xea, 0xe6, 0x63, 0xcd, 0x81, 0x3c, 0xc2, 0x46, 0x09, 0x89, 0x39, 0x54, 0xf7, 0x6d, 0xe9,
|
0x39, 0x67, 0xb6, 0x44, 0xc1, 0x7e, 0xb8, 0x6f, 0x76, 0x2e, 0x1a, 0x09, 0xe4, 0x3e, 0x9e, 0x55,
|
||||||
0xbc, 0xc5, 0x93, 0xce, 0xa1, 0xde, 0xf6, 0x94, 0xb3, 0x71, 0x43, 0xce, 0xfe, 0x37, 0x84, 0x07,
|
0x90, 0x3a, 0x73, 0xb3, 0xdf, 0x85, 0xfe, 0x19, 0x3e, 0xe8, 0xbd, 0xd9, 0x43, 0x0e, 0x83, 0x9d,
|
||||||
0xad, 0x0a, 0x59, 0xe3, 0xa1, 0x0a, 0x8f, 0xcc, 0xce, 0x1a, 0xfb, 0xff, 0xd2, 0x7a, 0x72, 0x19,
|
0x4d, 0x1d, 0xec, 0xea, 0x2b, 0xc2, 0xf3, 0xae, 0x04, 0xf9, 0x88, 0x77, 0xcd, 0xc0, 0xc8, 0x11,
|
||||||
0xd4, 0x0e, 0x5e, 0x6b, 0xad, 0xf3, 0x6d, 0x7b, 0x41, 0x5a, 0xb3, 0x8b, 0x58, 0x27, 0xb0, 0x36,
|
0xdd, 0x72, 0x27, 0xe9, 0xf8, 0x3f, 0xba, 0x4f, 0xa7, 0xa0, 0xd6, 0xda, 0x07, 0xdb, 0x27, 0xd8,
|
||||||
0x0f, 0x47, 0xfb, 0xee, 0xe7, 0xd1, 0xbe, 0xfb, 0xda, 0xd8, 0xe8, 0xd0, 0xd8, 0xe8, 0x47, 0x63,
|
0xaa, 0x19, 0x4d, 0xda, 0x3d, 0x9a, 0x40, 0xf6, 0xc5, 0xdf, 0x5c, 0x5c, 0xdf, 0x78, 0x3b, 0xdf,
|
||||||
0xa3, 0x5f, 0x8d, 0x8d, 0xb6, 0xf7, 0xea, 0x6d, 0xad, 0x7e, 0x07, 0x00, 0x00, 0xff, 0xff, 0x61,
|
0x6f, 0xbc, 0x9d, 0xcf, 0xad, 0x87, 0xae, 0x5b, 0x0f, 0x7d, 0x6b, 0x3d, 0xf4, 0xa3, 0xf5, 0xd0,
|
||||||
0x65, 0x17, 0x47, 0x85, 0x03, 0x00, 0x00,
|
0xfb, 0x57, 0xb7, 0x7a, 0xa2, 0xa7, 0xdd, 0xba, 0xde, 0x33, 0xb7, 0xf7, 0xe5, 0xcf, 0x00, 0x00,
|
||||||
|
0x00, 0xff, 0xff, 0x44, 0x8b, 0x75, 0x5d, 0xe7, 0x03, 0x00, 0x00,
|
||||||
}
|
}
|
@ -1,12 +1,14 @@
|
|||||||
syntax = "proto3";
|
syntax = "proto3";
|
||||||
|
|
||||||
package containerd.v1;
|
package containerd.services.diff.v1;
|
||||||
|
|
||||||
import "gogoproto/gogo.proto";
|
import "gogoproto/gogo.proto";
|
||||||
import "google/protobuf/empty.proto";
|
import "google/protobuf/empty.proto";
|
||||||
import "google/protobuf/timestamp.proto";
|
import "google/protobuf/timestamp.proto";
|
||||||
import "github.com/containerd/containerd/api/types/mount/mount.proto";
|
import "github.com/containerd/containerd/api/types/mount.proto";
|
||||||
import "github.com/containerd/containerd/api/types/descriptor/descriptor.proto";
|
import "github.com/containerd/containerd/api/types/descriptor.proto";
|
||||||
|
|
||||||
|
option go_package = "github.com/containerd/containerd/api/services/diff/v1;diff";
|
||||||
|
|
||||||
// Diff service creates and applies diffs
|
// Diff service creates and applies diffs
|
||||||
service Diff {
|
service Diff {
|
||||||
@ -22,26 +24,26 @@ service Diff {
|
|||||||
|
|
||||||
message ApplyRequest {
|
message ApplyRequest {
|
||||||
// Diff is the descriptor of the diff to be extracted
|
// Diff is the descriptor of the diff to be extracted
|
||||||
containerd.v1.types.Descriptor diff = 1;
|
containerd.types.Descriptor diff = 1;
|
||||||
|
|
||||||
repeated containerd.v1.types.Mount mounts = 2;
|
repeated containerd.types.Mount mounts = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
message ApplyResponse {
|
message ApplyResponse {
|
||||||
// Applied is the descriptor for the object which was applied.
|
// Applied is the descriptor for the object which was applied.
|
||||||
// If the input was a compressed blob then the result will be
|
// If the input was a compressed blob then the result will be
|
||||||
// the descriptor for the uncompressed blob.
|
// the descriptor for the uncompressed blob.
|
||||||
containerd.v1.types.Descriptor applied = 1;
|
containerd.types.Descriptor applied = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message DiffRequest {
|
message DiffRequest {
|
||||||
// Left are the mounts which represent the older copy
|
// Left are the mounts which represent the older copy
|
||||||
// in which is the base of the computed changes.
|
// in which is the base of the computed changes.
|
||||||
repeated containerd.v1.types.Mount left = 1;
|
repeated containerd.types.Mount left = 1;
|
||||||
|
|
||||||
// Right are the mounts which represents the newer copy
|
// Right are the mounts which represents the newer copy
|
||||||
// in which changes from the left were made into.
|
// in which changes from the left were made into.
|
||||||
repeated containerd.v1.types.Mount right = 2;
|
repeated containerd.types.Mount right = 2;
|
||||||
|
|
||||||
// MediaType is the media type descriptor for the created diff
|
// MediaType is the media type descriptor for the created diff
|
||||||
// object
|
// object
|
||||||
@ -49,10 +51,10 @@ message DiffRequest {
|
|||||||
|
|
||||||
// Ref identifies the pre-commit content store object. This
|
// Ref identifies the pre-commit content store object. This
|
||||||
// reference can be used to get the status from the content store.
|
// reference can be used to get the status from the content store.
|
||||||
string ref = 5;
|
string ref = 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
message DiffResponse {
|
message DiffResponse {
|
||||||
// Diff is the descriptor of the diff which can be applied
|
// Diff is the descriptor of the diff which can be applied
|
||||||
containerd.v1.types.Descriptor diff = 3;
|
containerd.types.Descriptor diff = 3;
|
||||||
}
|
}
|
1161
vendor/github.com/containerd/containerd/api/services/events/v1/container.pb.go
generated
vendored
Normal file
1161
vendor/github.com/containerd/containerd/api/services/events/v1/container.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
29
vendor/github.com/containerd/containerd/api/services/events/v1/container.proto
generated
vendored
Normal file
29
vendor/github.com/containerd/containerd/api/services/events/v1/container.proto
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
syntax = "proto3";
|
||||||
|
|
||||||
|
package containerd.services.events.v1;
|
||||||
|
|
||||||
|
import "gogoproto/gogo.proto";
|
||||||
|
import "google/protobuf/any.proto";
|
||||||
|
|
||||||
|
option go_package = "github.com/containerd/containerd/api/services/events/v1;events";
|
||||||
|
|
||||||
|
message ContainerCreate {
|
||||||
|
string id = 1;
|
||||||
|
string image = 2;
|
||||||
|
message Runtime {
|
||||||
|
string name = 1;
|
||||||
|
google.protobuf.Any options = 2;
|
||||||
|
}
|
||||||
|
Runtime runtime = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ContainerUpdate {
|
||||||
|
string id = 1;
|
||||||
|
string image = 2;
|
||||||
|
map<string, string> labels = 3;
|
||||||
|
string rootfs = 4 [(gogoproto.customname) = "RootFS"];
|
||||||
|
}
|
||||||
|
|
||||||
|
message ContainerDelete {
|
||||||
|
string id = 1;
|
||||||
|
}
|
331
vendor/github.com/containerd/containerd/api/services/events/v1/content.pb.go
generated
vendored
Normal file
331
vendor/github.com/containerd/containerd/api/services/events/v1/content.pb.go
generated
vendored
Normal file
@ -0,0 +1,331 @@
|
|||||||
|
// Code generated by protoc-gen-gogo.
|
||||||
|
// source: github.com/containerd/containerd/api/services/events/v1/content.proto
|
||||||
|
// DO NOT EDIT!
|
||||||
|
|
||||||
|
package events
|
||||||
|
|
||||||
|
import proto "github.com/gogo/protobuf/proto"
|
||||||
|
import fmt "fmt"
|
||||||
|
import math "math"
|
||||||
|
import _ "github.com/gogo/protobuf/gogoproto"
|
||||||
|
|
||||||
|
import github_com_opencontainers_go_digest "github.com/opencontainers/go-digest"
|
||||||
|
|
||||||
|
import strings "strings"
|
||||||
|
import reflect "reflect"
|
||||||
|
|
||||||
|
import io "io"
|
||||||
|
|
||||||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
var _ = proto.Marshal
|
||||||
|
var _ = fmt.Errorf
|
||||||
|
var _ = math.Inf
|
||||||
|
|
||||||
|
type ContentDelete struct {
|
||||||
|
Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ContentDelete) Reset() { *m = ContentDelete{} }
|
||||||
|
func (*ContentDelete) ProtoMessage() {}
|
||||||
|
func (*ContentDelete) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{0} }
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterType((*ContentDelete)(nil), "containerd.services.events.v1.ContentDelete")
|
||||||
|
}
|
||||||
|
func (m *ContentDelete) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalTo(dAtA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ContentDelete) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if len(m.Digest) > 0 {
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
i++
|
||||||
|
i = encodeVarintContent(dAtA, i, uint64(len(m.Digest)))
|
||||||
|
i += copy(dAtA[i:], m.Digest)
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeFixed64Content(dAtA []byte, offset int, v uint64) int {
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
|
dAtA[offset+4] = uint8(v >> 32)
|
||||||
|
dAtA[offset+5] = uint8(v >> 40)
|
||||||
|
dAtA[offset+6] = uint8(v >> 48)
|
||||||
|
dAtA[offset+7] = uint8(v >> 56)
|
||||||
|
return offset + 8
|
||||||
|
}
|
||||||
|
func encodeFixed32Content(dAtA []byte, offset int, v uint32) int {
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
|
return offset + 4
|
||||||
|
}
|
||||||
|
func encodeVarintContent(dAtA []byte, offset int, v uint64) int {
|
||||||
|
for v >= 1<<7 {
|
||||||
|
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||||
|
v >>= 7
|
||||||
|
offset++
|
||||||
|
}
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
return offset + 1
|
||||||
|
}
|
||||||
|
func (m *ContentDelete) Size() (n int) {
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
l = len(m.Digest)
|
||||||
|
if l > 0 {
|
||||||
|
n += 1 + l + sovContent(uint64(l))
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func sovContent(x uint64) (n int) {
|
||||||
|
for {
|
||||||
|
n++
|
||||||
|
x >>= 7
|
||||||
|
if x == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
func sozContent(x uint64) (n int) {
|
||||||
|
return sovContent(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||||
|
}
|
||||||
|
func (this *ContentDelete) String() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
s := strings.Join([]string{`&ContentDelete{`,
|
||||||
|
`Digest:` + fmt.Sprintf("%v", this.Digest) + `,`,
|
||||||
|
`}`,
|
||||||
|
}, "")
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
func valueToStringContent(v interface{}) string {
|
||||||
|
rv := reflect.ValueOf(v)
|
||||||
|
if rv.IsNil() {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
pv := reflect.Indirect(rv).Interface()
|
||||||
|
return fmt.Sprintf("*%v", pv)
|
||||||
|
}
|
||||||
|
func (m *ContentDelete) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowContent
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: ContentDelete: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: ContentDelete: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowContent
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthContent
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex])
|
||||||
|
iNdEx = postIndex
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipContent(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthContent
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func skipContent(dAtA []byte) (n int, err error) {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowContent
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
switch wireType {
|
||||||
|
case 0:
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowContent
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx++
|
||||||
|
if dAtA[iNdEx-1] < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 1:
|
||||||
|
iNdEx += 8
|
||||||
|
return iNdEx, nil
|
||||||
|
case 2:
|
||||||
|
var length int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowContent
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
length |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
iNdEx += length
|
||||||
|
if length < 0 {
|
||||||
|
return 0, ErrInvalidLengthContent
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 3:
|
||||||
|
for {
|
||||||
|
var innerWire uint64
|
||||||
|
var start int = iNdEx
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowContent
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
innerWire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
innerWireType := int(innerWire & 0x7)
|
||||||
|
if innerWireType == 4 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
next, err := skipContent(dAtA[start:])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
iNdEx = start + next
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 4:
|
||||||
|
return iNdEx, nil
|
||||||
|
case 5:
|
||||||
|
iNdEx += 4
|
||||||
|
return iNdEx, nil
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrInvalidLengthContent = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||||
|
ErrIntOverflowContent = fmt.Errorf("proto: integer overflow")
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterFile("github.com/containerd/containerd/api/services/events/v1/content.proto", fileDescriptorContent)
|
||||||
|
}
|
||||||
|
|
||||||
|
var fileDescriptorContent = []byte{
|
||||||
|
// 210 bytes of a gzipped FileDescriptorProto
|
||||||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x4d, 0xcf, 0x2c, 0xc9,
|
||||||
|
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
|
||||||
|
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x17, 0xa7, 0x16, 0x95, 0x65, 0x26, 0xa7, 0x16, 0xeb,
|
||||||
|
0xa7, 0x96, 0xa5, 0xe6, 0x95, 0x14, 0xeb, 0x97, 0x19, 0x82, 0x55, 0xa4, 0xe6, 0x95, 0xe8, 0x15,
|
||||||
|
0x14, 0xe5, 0x97, 0xe4, 0x0b, 0xc9, 0x22, 0x34, 0xe8, 0xc1, 0x14, 0xeb, 0x41, 0x14, 0xeb, 0x95,
|
||||||
|
0x19, 0x4a, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x55, 0xea, 0x83, 0x58, 0x10, 0x4d, 0x4a, 0xd1,
|
||||||
|
0x5c, 0xbc, 0xce, 0x10, 0x53, 0x5c, 0x52, 0x73, 0x52, 0x4b, 0x52, 0x85, 0xbc, 0xb8, 0xd8, 0x52,
|
||||||
|
0x32, 0xd3, 0x53, 0x8b, 0x4b, 0x24, 0x18, 0x15, 0x18, 0x35, 0x38, 0x9d, 0x8c, 0x4e, 0xdc, 0x93,
|
||||||
|
0x67, 0xb8, 0x75, 0x4f, 0x5e, 0x0b, 0xc9, 0x91, 0xf9, 0x05, 0xa9, 0x79, 0x70, 0xcb, 0x8a, 0xf5,
|
||||||
|
0xd3, 0xf3, 0x75, 0x21, 0x5a, 0xf4, 0x5c, 0xc0, 0x54, 0x10, 0xd4, 0x04, 0xa7, 0x88, 0x13, 0x0f,
|
||||||
|
0xe5, 0x18, 0x6e, 0x3c, 0x94, 0x63, 0x68, 0x78, 0x24, 0xc7, 0x78, 0xe2, 0x91, 0x1c, 0xe3, 0x85,
|
||||||
|
0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0x46, 0xd9, 0x91, 0xe9, 0x65, 0x6b, 0x08, 0x2b, 0x89,
|
||||||
|
0x0d, 0xec, 0x7a, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x41, 0x46, 0x06, 0x3b, 0x01,
|
||||||
|
0x00, 0x00,
|
||||||
|
}
|
11
vendor/github.com/containerd/containerd/api/services/events/v1/content.proto
generated
vendored
Normal file
11
vendor/github.com/containerd/containerd/api/services/events/v1/content.proto
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
syntax = "proto3";
|
||||||
|
|
||||||
|
package containerd.services.events.v1;
|
||||||
|
|
||||||
|
import "gogoproto/gogo.proto";
|
||||||
|
|
||||||
|
option go_package = "github.com/containerd/containerd/api/services/events/v1;events";
|
||||||
|
|
||||||
|
message ContentDelete {
|
||||||
|
string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
||||||
|
}
|
903
vendor/github.com/containerd/containerd/api/services/events/v1/events.pb.go
generated
vendored
Normal file
903
vendor/github.com/containerd/containerd/api/services/events/v1/events.pb.go
generated
vendored
Normal file
@ -0,0 +1,903 @@
|
|||||||
|
// Code generated by protoc-gen-gogo.
|
||||||
|
// source: github.com/containerd/containerd/api/services/events/v1/events.proto
|
||||||
|
// DO NOT EDIT!
|
||||||
|
|
||||||
|
package events
|
||||||
|
|
||||||
|
import proto "github.com/gogo/protobuf/proto"
|
||||||
|
import fmt "fmt"
|
||||||
|
import math "math"
|
||||||
|
import _ "github.com/gogo/protobuf/gogoproto"
|
||||||
|
import google_protobuf1 "github.com/gogo/protobuf/types"
|
||||||
|
import google_protobuf2 "github.com/golang/protobuf/ptypes/empty"
|
||||||
|
import _ "github.com/gogo/protobuf/types"
|
||||||
|
|
||||||
|
import time "time"
|
||||||
|
|
||||||
|
import (
|
||||||
|
context "golang.org/x/net/context"
|
||||||
|
grpc "google.golang.org/grpc"
|
||||||
|
)
|
||||||
|
|
||||||
|
import github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
|
||||||
|
|
||||||
|
import strings "strings"
|
||||||
|
import reflect "reflect"
|
||||||
|
|
||||||
|
import io "io"
|
||||||
|
|
||||||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
var _ = proto.Marshal
|
||||||
|
var _ = fmt.Errorf
|
||||||
|
var _ = math.Inf
|
||||||
|
var _ = time.Kitchen
|
||||||
|
|
||||||
|
type SubscribeRequest struct {
|
||||||
|
Filters []string `protobuf:"bytes,1,rep,name=filters" json:"filters,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SubscribeRequest) Reset() { *m = SubscribeRequest{} }
|
||||||
|
func (*SubscribeRequest) ProtoMessage() {}
|
||||||
|
func (*SubscribeRequest) Descriptor() ([]byte, []int) { return fileDescriptorEvents, []int{0} }
|
||||||
|
|
||||||
|
type PublishRequest struct {
|
||||||
|
Envelope *Envelope `protobuf:"bytes,1,opt,name=envelope" json:"envelope,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *PublishRequest) Reset() { *m = PublishRequest{} }
|
||||||
|
func (*PublishRequest) ProtoMessage() {}
|
||||||
|
func (*PublishRequest) Descriptor() ([]byte, []int) { return fileDescriptorEvents, []int{1} }
|
||||||
|
|
||||||
|
type Envelope struct {
|
||||||
|
Timestamp time.Time `protobuf:"bytes,1,opt,name=timestamp,stdtime" json:"timestamp"`
|
||||||
|
Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"`
|
||||||
|
Topic string `protobuf:"bytes,3,opt,name=topic,proto3" json:"topic,omitempty"`
|
||||||
|
Event *google_protobuf1.Any `protobuf:"bytes,4,opt,name=event" json:"event,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Envelope) Reset() { *m = Envelope{} }
|
||||||
|
func (*Envelope) ProtoMessage() {}
|
||||||
|
func (*Envelope) Descriptor() ([]byte, []int) { return fileDescriptorEvents, []int{2} }
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterType((*SubscribeRequest)(nil), "containerd.services.events.v1.SubscribeRequest")
|
||||||
|
proto.RegisterType((*PublishRequest)(nil), "containerd.services.events.v1.PublishRequest")
|
||||||
|
proto.RegisterType((*Envelope)(nil), "containerd.services.events.v1.Envelope")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
var _ context.Context
|
||||||
|
var _ grpc.ClientConn
|
||||||
|
|
||||||
|
// This is a compile-time assertion to ensure that this generated file
|
||||||
|
// is compatible with the grpc package it is being compiled against.
|
||||||
|
const _ = grpc.SupportPackageIsVersion4
|
||||||
|
|
||||||
|
// Client API for Events service
|
||||||
|
|
||||||
|
type EventsClient interface {
|
||||||
|
Publish(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error)
|
||||||
|
Subscribe(ctx context.Context, in *SubscribeRequest, opts ...grpc.CallOption) (Events_SubscribeClient, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type eventsClient struct {
|
||||||
|
cc *grpc.ClientConn
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewEventsClient(cc *grpc.ClientConn) EventsClient {
|
||||||
|
return &eventsClient{cc}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *eventsClient) Publish(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) {
|
||||||
|
out := new(google_protobuf2.Empty)
|
||||||
|
err := grpc.Invoke(ctx, "/containerd.services.events.v1.Events/Publish", in, out, c.cc, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *eventsClient) Subscribe(ctx context.Context, in *SubscribeRequest, opts ...grpc.CallOption) (Events_SubscribeClient, error) {
|
||||||
|
stream, err := grpc.NewClientStream(ctx, &_Events_serviceDesc.Streams[0], c.cc, "/containerd.services.events.v1.Events/Subscribe", opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
x := &eventsSubscribeClient{stream}
|
||||||
|
if err := x.ClientStream.SendMsg(in); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := x.ClientStream.CloseSend(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return x, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type Events_SubscribeClient interface {
|
||||||
|
Recv() (*Envelope, error)
|
||||||
|
grpc.ClientStream
|
||||||
|
}
|
||||||
|
|
||||||
|
type eventsSubscribeClient struct {
|
||||||
|
grpc.ClientStream
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *eventsSubscribeClient) Recv() (*Envelope, error) {
|
||||||
|
m := new(Envelope)
|
||||||
|
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return m, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Server API for Events service
|
||||||
|
|
||||||
|
type EventsServer interface {
|
||||||
|
Publish(context.Context, *PublishRequest) (*google_protobuf2.Empty, error)
|
||||||
|
Subscribe(*SubscribeRequest, Events_SubscribeServer) error
|
||||||
|
}
|
||||||
|
|
||||||
|
func RegisterEventsServer(s *grpc.Server, srv EventsServer) {
|
||||||
|
s.RegisterService(&_Events_serviceDesc, srv)
|
||||||
|
}
|
||||||
|
|
||||||
|
func _Events_Publish_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||||
|
in := new(PublishRequest)
|
||||||
|
if err := dec(in); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if interceptor == nil {
|
||||||
|
return srv.(EventsServer).Publish(ctx, in)
|
||||||
|
}
|
||||||
|
info := &grpc.UnaryServerInfo{
|
||||||
|
Server: srv,
|
||||||
|
FullMethod: "/containerd.services.events.v1.Events/Publish",
|
||||||
|
}
|
||||||
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
|
return srv.(EventsServer).Publish(ctx, req.(*PublishRequest))
|
||||||
|
}
|
||||||
|
return interceptor(ctx, in, info, handler)
|
||||||
|
}
|
||||||
|
|
||||||
|
func _Events_Subscribe_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||||
|
m := new(SubscribeRequest)
|
||||||
|
if err := stream.RecvMsg(m); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return srv.(EventsServer).Subscribe(m, &eventsSubscribeServer{stream})
|
||||||
|
}
|
||||||
|
|
||||||
|
type Events_SubscribeServer interface {
|
||||||
|
Send(*Envelope) error
|
||||||
|
grpc.ServerStream
|
||||||
|
}
|
||||||
|
|
||||||
|
type eventsSubscribeServer struct {
|
||||||
|
grpc.ServerStream
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *eventsSubscribeServer) Send(m *Envelope) error {
|
||||||
|
return x.ServerStream.SendMsg(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var _Events_serviceDesc = grpc.ServiceDesc{
|
||||||
|
ServiceName: "containerd.services.events.v1.Events",
|
||||||
|
HandlerType: (*EventsServer)(nil),
|
||||||
|
Methods: []grpc.MethodDesc{
|
||||||
|
{
|
||||||
|
MethodName: "Publish",
|
||||||
|
Handler: _Events_Publish_Handler,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Streams: []grpc.StreamDesc{
|
||||||
|
{
|
||||||
|
StreamName: "Subscribe",
|
||||||
|
Handler: _Events_Subscribe_Handler,
|
||||||
|
ServerStreams: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Metadata: "github.com/containerd/containerd/api/services/events/v1/events.proto",
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SubscribeRequest) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalTo(dAtA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SubscribeRequest) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if len(m.Filters) > 0 {
|
||||||
|
for _, s := range m.Filters {
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
i++
|
||||||
|
l = len(s)
|
||||||
|
for l >= 1<<7 {
|
||||||
|
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
|
||||||
|
l >>= 7
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
dAtA[i] = uint8(l)
|
||||||
|
i++
|
||||||
|
i += copy(dAtA[i:], s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *PublishRequest) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalTo(dAtA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *PublishRequest) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if m.Envelope != nil {
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
i++
|
||||||
|
i = encodeVarintEvents(dAtA, i, uint64(m.Envelope.Size()))
|
||||||
|
n1, err := m.Envelope.MarshalTo(dAtA[i:])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
i += n1
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Envelope) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalTo(dAtA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Envelope) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
i++
|
||||||
|
i = encodeVarintEvents(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp)))
|
||||||
|
n2, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i:])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
i += n2
|
||||||
|
if len(m.Namespace) > 0 {
|
||||||
|
dAtA[i] = 0x12
|
||||||
|
i++
|
||||||
|
i = encodeVarintEvents(dAtA, i, uint64(len(m.Namespace)))
|
||||||
|
i += copy(dAtA[i:], m.Namespace)
|
||||||
|
}
|
||||||
|
if len(m.Topic) > 0 {
|
||||||
|
dAtA[i] = 0x1a
|
||||||
|
i++
|
||||||
|
i = encodeVarintEvents(dAtA, i, uint64(len(m.Topic)))
|
||||||
|
i += copy(dAtA[i:], m.Topic)
|
||||||
|
}
|
||||||
|
if m.Event != nil {
|
||||||
|
dAtA[i] = 0x22
|
||||||
|
i++
|
||||||
|
i = encodeVarintEvents(dAtA, i, uint64(m.Event.Size()))
|
||||||
|
n3, err := m.Event.MarshalTo(dAtA[i:])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
i += n3
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeFixed64Events(dAtA []byte, offset int, v uint64) int {
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
|
dAtA[offset+4] = uint8(v >> 32)
|
||||||
|
dAtA[offset+5] = uint8(v >> 40)
|
||||||
|
dAtA[offset+6] = uint8(v >> 48)
|
||||||
|
dAtA[offset+7] = uint8(v >> 56)
|
||||||
|
return offset + 8
|
||||||
|
}
|
||||||
|
func encodeFixed32Events(dAtA []byte, offset int, v uint32) int {
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
|
return offset + 4
|
||||||
|
}
|
||||||
|
func encodeVarintEvents(dAtA []byte, offset int, v uint64) int {
|
||||||
|
for v >= 1<<7 {
|
||||||
|
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||||
|
v >>= 7
|
||||||
|
offset++
|
||||||
|
}
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
return offset + 1
|
||||||
|
}
|
||||||
|
func (m *SubscribeRequest) Size() (n int) {
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if len(m.Filters) > 0 {
|
||||||
|
for _, s := range m.Filters {
|
||||||
|
l = len(s)
|
||||||
|
n += 1 + l + sovEvents(uint64(l))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *PublishRequest) Size() (n int) {
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if m.Envelope != nil {
|
||||||
|
l = m.Envelope.Size()
|
||||||
|
n += 1 + l + sovEvents(uint64(l))
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Envelope) Size() (n int) {
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp)
|
||||||
|
n += 1 + l + sovEvents(uint64(l))
|
||||||
|
l = len(m.Namespace)
|
||||||
|
if l > 0 {
|
||||||
|
n += 1 + l + sovEvents(uint64(l))
|
||||||
|
}
|
||||||
|
l = len(m.Topic)
|
||||||
|
if l > 0 {
|
||||||
|
n += 1 + l + sovEvents(uint64(l))
|
||||||
|
}
|
||||||
|
if m.Event != nil {
|
||||||
|
l = m.Event.Size()
|
||||||
|
n += 1 + l + sovEvents(uint64(l))
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func sovEvents(x uint64) (n int) {
|
||||||
|
for {
|
||||||
|
n++
|
||||||
|
x >>= 7
|
||||||
|
if x == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
func sozEvents(x uint64) (n int) {
|
||||||
|
return sovEvents(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||||
|
}
|
||||||
|
func (this *SubscribeRequest) String() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
s := strings.Join([]string{`&SubscribeRequest{`,
|
||||||
|
`Filters:` + fmt.Sprintf("%v", this.Filters) + `,`,
|
||||||
|
`}`,
|
||||||
|
}, "")
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
func (this *PublishRequest) String() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
s := strings.Join([]string{`&PublishRequest{`,
|
||||||
|
`Envelope:` + strings.Replace(fmt.Sprintf("%v", this.Envelope), "Envelope", "Envelope", 1) + `,`,
|
||||||
|
`}`,
|
||||||
|
}, "")
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
func (this *Envelope) String() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
s := strings.Join([]string{`&Envelope{`,
|
||||||
|
`Timestamp:` + strings.Replace(strings.Replace(this.Timestamp.String(), "Timestamp", "google_protobuf3.Timestamp", 1), `&`, ``, 1) + `,`,
|
||||||
|
`Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
|
||||||
|
`Topic:` + fmt.Sprintf("%v", this.Topic) + `,`,
|
||||||
|
`Event:` + strings.Replace(fmt.Sprintf("%v", this.Event), "Any", "google_protobuf1.Any", 1) + `,`,
|
||||||
|
`}`,
|
||||||
|
}, "")
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
func valueToStringEvents(v interface{}) string {
|
||||||
|
rv := reflect.ValueOf(v)
|
||||||
|
if rv.IsNil() {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
pv := reflect.Indirect(rv).Interface()
|
||||||
|
return fmt.Sprintf("*%v", pv)
|
||||||
|
}
|
||||||
|
func (m *SubscribeRequest) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowEvents
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: SubscribeRequest: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: SubscribeRequest: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowEvents
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthEvents
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Filters = append(m.Filters, string(dAtA[iNdEx:postIndex]))
|
||||||
|
iNdEx = postIndex
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipEvents(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthEvents
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (m *PublishRequest) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowEvents
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: PublishRequest: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: PublishRequest: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Envelope", wireType)
|
||||||
|
}
|
||||||
|
var msglen int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowEvents
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if msglen < 0 {
|
||||||
|
return ErrInvalidLengthEvents
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + msglen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
if m.Envelope == nil {
|
||||||
|
m.Envelope = &Envelope{}
|
||||||
|
}
|
||||||
|
if err := m.Envelope.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
iNdEx = postIndex
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipEvents(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthEvents
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (m *Envelope) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowEvents
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: Envelope: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: Envelope: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType)
|
||||||
|
}
|
||||||
|
var msglen int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowEvents
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if msglen < 0 {
|
||||||
|
return ErrInvalidLengthEvents
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + msglen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
iNdEx = postIndex
|
||||||
|
case 2:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowEvents
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthEvents
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Namespace = string(dAtA[iNdEx:postIndex])
|
||||||
|
iNdEx = postIndex
|
||||||
|
case 3:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Topic", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowEvents
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthEvents
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Topic = string(dAtA[iNdEx:postIndex])
|
||||||
|
iNdEx = postIndex
|
||||||
|
case 4:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Event", wireType)
|
||||||
|
}
|
||||||
|
var msglen int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowEvents
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if msglen < 0 {
|
||||||
|
return ErrInvalidLengthEvents
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + msglen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
if m.Event == nil {
|
||||||
|
m.Event = &google_protobuf1.Any{}
|
||||||
|
}
|
||||||
|
if err := m.Event.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
iNdEx = postIndex
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipEvents(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthEvents
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func skipEvents(dAtA []byte) (n int, err error) {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowEvents
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
switch wireType {
|
||||||
|
case 0:
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowEvents
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx++
|
||||||
|
if dAtA[iNdEx-1] < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 1:
|
||||||
|
iNdEx += 8
|
||||||
|
return iNdEx, nil
|
||||||
|
case 2:
|
||||||
|
var length int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowEvents
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
length |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
iNdEx += length
|
||||||
|
if length < 0 {
|
||||||
|
return 0, ErrInvalidLengthEvents
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 3:
|
||||||
|
for {
|
||||||
|
var innerWire uint64
|
||||||
|
var start int = iNdEx
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowEvents
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
innerWire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
innerWireType := int(innerWire & 0x7)
|
||||||
|
if innerWireType == 4 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
next, err := skipEvents(dAtA[start:])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
iNdEx = start + next
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 4:
|
||||||
|
return iNdEx, nil
|
||||||
|
case 5:
|
||||||
|
iNdEx += 4
|
||||||
|
return iNdEx, nil
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrInvalidLengthEvents = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||||
|
ErrIntOverflowEvents = fmt.Errorf("proto: integer overflow")
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterFile("github.com/containerd/containerd/api/services/events/v1/events.proto", fileDescriptorEvents)
|
||||||
|
}
|
||||||
|
|
||||||
|
var fileDescriptorEvents = []byte{
|
||||||
|
// 407 bytes of a gzipped FileDescriptorProto
|
||||||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x92, 0xcd, 0x6e, 0xd3, 0x40,
|
||||||
|
0x10, 0xc7, 0xb3, 0x84, 0x7c, 0x78, 0x91, 0x10, 0x5a, 0x45, 0xc8, 0x18, 0x70, 0xa2, 0x5c, 0x88,
|
||||||
|
0x10, 0xec, 0x92, 0x70, 0x44, 0x42, 0x22, 0x90, 0x7b, 0x64, 0x40, 0x42, 0xdc, 0x6c, 0x77, 0xe2,
|
||||||
|
0xac, 0x64, 0x7b, 0x5d, 0xef, 0xda, 0x52, 0x6e, 0x7d, 0x84, 0x3e, 0x49, 0x5f, 0xa2, 0x97, 0x1c,
|
||||||
|
0x7b, 0xec, 0xa9, 0x6d, 0xfc, 0x24, 0x55, 0xfc, 0x91, 0xb4, 0x89, 0xd4, 0x54, 0xbd, 0xcd, 0xec,
|
||||||
|
0xff, 0x37, 0x3b, 0xfb, 0x9f, 0x59, 0xfc, 0xcb, 0xe3, 0x6a, 0x9e, 0x38, 0xd4, 0x15, 0x01, 0x73,
|
||||||
|
0x45, 0xa8, 0x6c, 0x1e, 0x42, 0x7c, 0x74, 0x37, 0xb4, 0x23, 0xce, 0x24, 0xc4, 0x29, 0x77, 0x41,
|
||||||
|
0x32, 0x48, 0x21, 0x54, 0x92, 0xa5, 0xc3, 0x32, 0xa2, 0x51, 0x2c, 0x94, 0x20, 0xef, 0xb7, 0x3c,
|
||||||
|
0xad, 0x58, 0x5a, 0x12, 0xe9, 0xd0, 0xe8, 0x78, 0xc2, 0x13, 0x39, 0xc9, 0xd6, 0x51, 0x51, 0x64,
|
||||||
|
0xbc, 0xf1, 0x84, 0xf0, 0x7c, 0x60, 0x79, 0xe6, 0x24, 0x33, 0x66, 0x87, 0x8b, 0x52, 0x7a, 0xbb,
|
||||||
|
0x2b, 0x41, 0x10, 0xa9, 0x4a, 0xec, 0xee, 0x8a, 0x8a, 0x07, 0x20, 0x95, 0x1d, 0x44, 0x05, 0xd0,
|
||||||
|
0xff, 0x84, 0x5f, 0xfd, 0x4e, 0x1c, 0xe9, 0xc6, 0xdc, 0x01, 0x0b, 0x8e, 0x13, 0x90, 0x8a, 0xe8,
|
||||||
|
0xb8, 0x35, 0xe3, 0xbe, 0x82, 0x58, 0xea, 0xa8, 0x57, 0x1f, 0x68, 0x56, 0x95, 0xf6, 0xff, 0xe2,
|
||||||
|
0x97, 0xd3, 0xc4, 0xf1, 0xb9, 0x9c, 0x57, 0xec, 0x4f, 0xdc, 0x86, 0x30, 0x05, 0x5f, 0x44, 0xa0,
|
||||||
|
0xa3, 0x1e, 0x1a, 0xbc, 0x18, 0x7d, 0xa0, 0x0f, 0x1a, 0xa4, 0x93, 0x12, 0xb7, 0x36, 0x85, 0xfd,
|
||||||
|
0x33, 0x84, 0xdb, 0xd5, 0x31, 0x19, 0x63, 0x6d, 0xf3, 0xc8, 0xf2, 0x4a, 0x83, 0x16, 0x36, 0x68,
|
||||||
|
0x65, 0x83, 0xfe, 0xa9, 0x88, 0x71, 0x7b, 0x79, 0xd5, 0xad, 0x9d, 0x5e, 0x77, 0x91, 0xb5, 0x2d,
|
||||||
|
0x23, 0xef, 0xb0, 0x16, 0xda, 0x01, 0xc8, 0xc8, 0x76, 0x41, 0x7f, 0xd6, 0x43, 0x03, 0xcd, 0xda,
|
||||||
|
0x1e, 0x90, 0x0e, 0x6e, 0x28, 0x11, 0x71, 0x57, 0xaf, 0xe7, 0x4a, 0x91, 0x90, 0x8f, 0xb8, 0x91,
|
||||||
|
0x3f, 0x52, 0x7f, 0x9e, 0xf7, 0xec, 0xec, 0xf5, 0xfc, 0x11, 0x2e, 0xac, 0x02, 0x19, 0x9d, 0x23,
|
||||||
|
0xdc, 0x9c, 0xe4, 0x8e, 0xc8, 0x14, 0xb7, 0xca, 0x91, 0x90, 0xcf, 0x07, 0x9c, 0xdf, 0x1f, 0x9d,
|
||||||
|
0xf1, 0x7a, 0xaf, 0xc3, 0x64, 0xbd, 0x39, 0xe2, 0x61, 0x6d, 0xb3, 0x12, 0xc2, 0x0e, 0xdc, 0xb9,
|
||||||
|
0xbb, 0x3c, 0xe3, 0xb1, 0xe3, 0xff, 0x82, 0xc6, 0xff, 0x96, 0x2b, 0xb3, 0x76, 0xb9, 0x32, 0x6b,
|
||||||
|
0x27, 0x99, 0x89, 0x96, 0x99, 0x89, 0x2e, 0x32, 0x13, 0xdd, 0x64, 0x26, 0xfa, 0xff, 0xfd, 0x89,
|
||||||
|
0x3f, 0xfd, 0x5b, 0x11, 0x39, 0xcd, 0xdc, 0xd2, 0xd7, 0xdb, 0x00, 0x00, 0x00, 0xff, 0xff, 0x13,
|
||||||
|
0x35, 0xd0, 0x60, 0x32, 0x03, 0x00, 0x00,
|
||||||
|
}
|
30
vendor/github.com/containerd/containerd/api/services/events/v1/events.proto
generated
vendored
Normal file
30
vendor/github.com/containerd/containerd/api/services/events/v1/events.proto
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
syntax = "proto3";
|
||||||
|
|
||||||
|
package containerd.services.events.v1;
|
||||||
|
|
||||||
|
import "gogoproto/gogo.proto";
|
||||||
|
import "google/protobuf/any.proto";
|
||||||
|
import "google/protobuf/empty.proto";
|
||||||
|
import "google/protobuf/timestamp.proto";
|
||||||
|
|
||||||
|
option go_package = "github.com/containerd/containerd/api/services/events/v1;events";
|
||||||
|
|
||||||
|
service Events {
|
||||||
|
rpc Publish(PublishRequest) returns (google.protobuf.Empty);
|
||||||
|
rpc Subscribe(SubscribeRequest) returns (stream Envelope);
|
||||||
|
}
|
||||||
|
|
||||||
|
message SubscribeRequest {
|
||||||
|
repeated string filters = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message PublishRequest {
|
||||||
|
Envelope envelope = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message Envelope {
|
||||||
|
google.protobuf.Timestamp timestamp = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||||
|
string namespace = 2;
|
||||||
|
string topic = 3;
|
||||||
|
google.protobuf.Any event = 4;
|
||||||
|
}
|
902
vendor/github.com/containerd/containerd/api/services/events/v1/image.pb.go
generated
vendored
Normal file
902
vendor/github.com/containerd/containerd/api/services/events/v1/image.pb.go
generated
vendored
Normal file
@ -0,0 +1,902 @@
|
|||||||
|
// Code generated by protoc-gen-gogo.
|
||||||
|
// source: github.com/containerd/containerd/api/services/events/v1/image.proto
|
||||||
|
// DO NOT EDIT!
|
||||||
|
|
||||||
|
package events
|
||||||
|
|
||||||
|
import proto "github.com/gogo/protobuf/proto"
|
||||||
|
import fmt "fmt"
|
||||||
|
import math "math"
|
||||||
|
|
||||||
|
import strings "strings"
|
||||||
|
import reflect "reflect"
|
||||||
|
import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
|
||||||
|
|
||||||
|
import io "io"
|
||||||
|
|
||||||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
var _ = proto.Marshal
|
||||||
|
var _ = fmt.Errorf
|
||||||
|
var _ = math.Inf
|
||||||
|
|
||||||
|
type ImageCreate struct {
|
||||||
|
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||||
|
Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ImageCreate) Reset() { *m = ImageCreate{} }
|
||||||
|
func (*ImageCreate) ProtoMessage() {}
|
||||||
|
func (*ImageCreate) Descriptor() ([]byte, []int) { return fileDescriptorImage, []int{0} }
|
||||||
|
|
||||||
|
type ImageUpdate struct {
|
||||||
|
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||||
|
Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ImageUpdate) Reset() { *m = ImageUpdate{} }
|
||||||
|
func (*ImageUpdate) ProtoMessage() {}
|
||||||
|
func (*ImageUpdate) Descriptor() ([]byte, []int) { return fileDescriptorImage, []int{1} }
|
||||||
|
|
||||||
|
type ImageDelete struct {
|
||||||
|
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ImageDelete) Reset() { *m = ImageDelete{} }
|
||||||
|
func (*ImageDelete) ProtoMessage() {}
|
||||||
|
func (*ImageDelete) Descriptor() ([]byte, []int) { return fileDescriptorImage, []int{2} }
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterType((*ImageCreate)(nil), "containerd.services.images.v1.ImageCreate")
|
||||||
|
proto.RegisterType((*ImageUpdate)(nil), "containerd.services.images.v1.ImageUpdate")
|
||||||
|
proto.RegisterType((*ImageDelete)(nil), "containerd.services.images.v1.ImageDelete")
|
||||||
|
}
|
||||||
|
func (m *ImageCreate) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalTo(dAtA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ImageCreate) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if len(m.Name) > 0 {
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
i++
|
||||||
|
i = encodeVarintImage(dAtA, i, uint64(len(m.Name)))
|
||||||
|
i += copy(dAtA[i:], m.Name)
|
||||||
|
}
|
||||||
|
if len(m.Labels) > 0 {
|
||||||
|
for k, _ := range m.Labels {
|
||||||
|
dAtA[i] = 0x12
|
||||||
|
i++
|
||||||
|
v := m.Labels[k]
|
||||||
|
mapSize := 1 + len(k) + sovImage(uint64(len(k))) + 1 + len(v) + sovImage(uint64(len(v)))
|
||||||
|
i = encodeVarintImage(dAtA, i, uint64(mapSize))
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
i++
|
||||||
|
i = encodeVarintImage(dAtA, i, uint64(len(k)))
|
||||||
|
i += copy(dAtA[i:], k)
|
||||||
|
dAtA[i] = 0x12
|
||||||
|
i++
|
||||||
|
i = encodeVarintImage(dAtA, i, uint64(len(v)))
|
||||||
|
i += copy(dAtA[i:], v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ImageUpdate) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalTo(dAtA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ImageUpdate) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if len(m.Name) > 0 {
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
i++
|
||||||
|
i = encodeVarintImage(dAtA, i, uint64(len(m.Name)))
|
||||||
|
i += copy(dAtA[i:], m.Name)
|
||||||
|
}
|
||||||
|
if len(m.Labels) > 0 {
|
||||||
|
for k, _ := range m.Labels {
|
||||||
|
dAtA[i] = 0x12
|
||||||
|
i++
|
||||||
|
v := m.Labels[k]
|
||||||
|
mapSize := 1 + len(k) + sovImage(uint64(len(k))) + 1 + len(v) + sovImage(uint64(len(v)))
|
||||||
|
i = encodeVarintImage(dAtA, i, uint64(mapSize))
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
i++
|
||||||
|
i = encodeVarintImage(dAtA, i, uint64(len(k)))
|
||||||
|
i += copy(dAtA[i:], k)
|
||||||
|
dAtA[i] = 0x12
|
||||||
|
i++
|
||||||
|
i = encodeVarintImage(dAtA, i, uint64(len(v)))
|
||||||
|
i += copy(dAtA[i:], v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ImageDelete) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalTo(dAtA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ImageDelete) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if len(m.Name) > 0 {
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
i++
|
||||||
|
i = encodeVarintImage(dAtA, i, uint64(len(m.Name)))
|
||||||
|
i += copy(dAtA[i:], m.Name)
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeFixed64Image(dAtA []byte, offset int, v uint64) int {
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
|
dAtA[offset+4] = uint8(v >> 32)
|
||||||
|
dAtA[offset+5] = uint8(v >> 40)
|
||||||
|
dAtA[offset+6] = uint8(v >> 48)
|
||||||
|
dAtA[offset+7] = uint8(v >> 56)
|
||||||
|
return offset + 8
|
||||||
|
}
|
||||||
|
func encodeFixed32Image(dAtA []byte, offset int, v uint32) int {
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
|
return offset + 4
|
||||||
|
}
|
||||||
|
func encodeVarintImage(dAtA []byte, offset int, v uint64) int {
|
||||||
|
for v >= 1<<7 {
|
||||||
|
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||||
|
v >>= 7
|
||||||
|
offset++
|
||||||
|
}
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
return offset + 1
|
||||||
|
}
|
||||||
|
func (m *ImageCreate) Size() (n int) {
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
l = len(m.Name)
|
||||||
|
if l > 0 {
|
||||||
|
n += 1 + l + sovImage(uint64(l))
|
||||||
|
}
|
||||||
|
if len(m.Labels) > 0 {
|
||||||
|
for k, v := range m.Labels {
|
||||||
|
_ = k
|
||||||
|
_ = v
|
||||||
|
mapEntrySize := 1 + len(k) + sovImage(uint64(len(k))) + 1 + len(v) + sovImage(uint64(len(v)))
|
||||||
|
n += mapEntrySize + 1 + sovImage(uint64(mapEntrySize))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ImageUpdate) Size() (n int) {
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
l = len(m.Name)
|
||||||
|
if l > 0 {
|
||||||
|
n += 1 + l + sovImage(uint64(l))
|
||||||
|
}
|
||||||
|
if len(m.Labels) > 0 {
|
||||||
|
for k, v := range m.Labels {
|
||||||
|
_ = k
|
||||||
|
_ = v
|
||||||
|
mapEntrySize := 1 + len(k) + sovImage(uint64(len(k))) + 1 + len(v) + sovImage(uint64(len(v)))
|
||||||
|
n += mapEntrySize + 1 + sovImage(uint64(mapEntrySize))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ImageDelete) Size() (n int) {
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
l = len(m.Name)
|
||||||
|
if l > 0 {
|
||||||
|
n += 1 + l + sovImage(uint64(l))
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func sovImage(x uint64) (n int) {
|
||||||
|
for {
|
||||||
|
n++
|
||||||
|
x >>= 7
|
||||||
|
if x == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
func sozImage(x uint64) (n int) {
|
||||||
|
return sovImage(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||||
|
}
|
||||||
|
func (this *ImageCreate) String() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
keysForLabels := make([]string, 0, len(this.Labels))
|
||||||
|
for k, _ := range this.Labels {
|
||||||
|
keysForLabels = append(keysForLabels, k)
|
||||||
|
}
|
||||||
|
github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
|
||||||
|
mapStringForLabels := "map[string]string{"
|
||||||
|
for _, k := range keysForLabels {
|
||||||
|
mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
|
||||||
|
}
|
||||||
|
mapStringForLabels += "}"
|
||||||
|
s := strings.Join([]string{`&ImageCreate{`,
|
||||||
|
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
|
||||||
|
`Labels:` + mapStringForLabels + `,`,
|
||||||
|
`}`,
|
||||||
|
}, "")
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
func (this *ImageUpdate) String() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
keysForLabels := make([]string, 0, len(this.Labels))
|
||||||
|
for k, _ := range this.Labels {
|
||||||
|
keysForLabels = append(keysForLabels, k)
|
||||||
|
}
|
||||||
|
github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
|
||||||
|
mapStringForLabels := "map[string]string{"
|
||||||
|
for _, k := range keysForLabels {
|
||||||
|
mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
|
||||||
|
}
|
||||||
|
mapStringForLabels += "}"
|
||||||
|
s := strings.Join([]string{`&ImageUpdate{`,
|
||||||
|
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
|
||||||
|
`Labels:` + mapStringForLabels + `,`,
|
||||||
|
`}`,
|
||||||
|
}, "")
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
func (this *ImageDelete) String() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
s := strings.Join([]string{`&ImageDelete{`,
|
||||||
|
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
|
||||||
|
`}`,
|
||||||
|
}, "")
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
func valueToStringImage(v interface{}) string {
|
||||||
|
rv := reflect.ValueOf(v)
|
||||||
|
if rv.IsNil() {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
pv := reflect.Indirect(rv).Interface()
|
||||||
|
return fmt.Sprintf("*%v", pv)
|
||||||
|
}
|
||||||
|
func (m *ImageCreate) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowImage
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: ImageCreate: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: ImageCreate: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowImage
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthImage
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Name = string(dAtA[iNdEx:postIndex])
|
||||||
|
iNdEx = postIndex
|
||||||
|
case 2:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
|
||||||
|
}
|
||||||
|
var msglen int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowImage
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if msglen < 0 {
|
||||||
|
return ErrInvalidLengthImage
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + msglen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
var keykey uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowImage
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
keykey |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var stringLenmapkey uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowImage
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLenmapkey := int(stringLenmapkey)
|
||||||
|
if intStringLenmapkey < 0 {
|
||||||
|
return ErrInvalidLengthImage
|
||||||
|
}
|
||||||
|
postStringIndexmapkey := iNdEx + intStringLenmapkey
|
||||||
|
if postStringIndexmapkey > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
|
||||||
|
iNdEx = postStringIndexmapkey
|
||||||
|
if m.Labels == nil {
|
||||||
|
m.Labels = make(map[string]string)
|
||||||
|
}
|
||||||
|
if iNdEx < postIndex {
|
||||||
|
var valuekey uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowImage
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
valuekey |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var stringLenmapvalue uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowImage
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLenmapvalue |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLenmapvalue := int(stringLenmapvalue)
|
||||||
|
if intStringLenmapvalue < 0 {
|
||||||
|
return ErrInvalidLengthImage
|
||||||
|
}
|
||||||
|
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
|
||||||
|
if postStringIndexmapvalue > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
|
||||||
|
iNdEx = postStringIndexmapvalue
|
||||||
|
m.Labels[mapkey] = mapvalue
|
||||||
|
} else {
|
||||||
|
var mapvalue string
|
||||||
|
m.Labels[mapkey] = mapvalue
|
||||||
|
}
|
||||||
|
iNdEx = postIndex
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipImage(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthImage
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (m *ImageUpdate) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowImage
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: ImageUpdate: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: ImageUpdate: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowImage
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthImage
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Name = string(dAtA[iNdEx:postIndex])
|
||||||
|
iNdEx = postIndex
|
||||||
|
case 2:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
|
||||||
|
}
|
||||||
|
var msglen int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowImage
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if msglen < 0 {
|
||||||
|
return ErrInvalidLengthImage
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + msglen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
var keykey uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowImage
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
keykey |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var stringLenmapkey uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowImage
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLenmapkey := int(stringLenmapkey)
|
||||||
|
if intStringLenmapkey < 0 {
|
||||||
|
return ErrInvalidLengthImage
|
||||||
|
}
|
||||||
|
postStringIndexmapkey := iNdEx + intStringLenmapkey
|
||||||
|
if postStringIndexmapkey > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
|
||||||
|
iNdEx = postStringIndexmapkey
|
||||||
|
if m.Labels == nil {
|
||||||
|
m.Labels = make(map[string]string)
|
||||||
|
}
|
||||||
|
if iNdEx < postIndex {
|
||||||
|
var valuekey uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowImage
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
valuekey |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var stringLenmapvalue uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowImage
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLenmapvalue |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLenmapvalue := int(stringLenmapvalue)
|
||||||
|
if intStringLenmapvalue < 0 {
|
||||||
|
return ErrInvalidLengthImage
|
||||||
|
}
|
||||||
|
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
|
||||||
|
if postStringIndexmapvalue > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
|
||||||
|
iNdEx = postStringIndexmapvalue
|
||||||
|
m.Labels[mapkey] = mapvalue
|
||||||
|
} else {
|
||||||
|
var mapvalue string
|
||||||
|
m.Labels[mapkey] = mapvalue
|
||||||
|
}
|
||||||
|
iNdEx = postIndex
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipImage(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthImage
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (m *ImageDelete) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowImage
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: ImageDelete: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: ImageDelete: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowImage
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthImage
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Name = string(dAtA[iNdEx:postIndex])
|
||||||
|
iNdEx = postIndex
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipImage(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthImage
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func skipImage(dAtA []byte) (n int, err error) {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowImage
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
switch wireType {
|
||||||
|
case 0:
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowImage
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx++
|
||||||
|
if dAtA[iNdEx-1] < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 1:
|
||||||
|
iNdEx += 8
|
||||||
|
return iNdEx, nil
|
||||||
|
case 2:
|
||||||
|
var length int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowImage
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
length |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
iNdEx += length
|
||||||
|
if length < 0 {
|
||||||
|
return 0, ErrInvalidLengthImage
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 3:
|
||||||
|
for {
|
||||||
|
var innerWire uint64
|
||||||
|
var start int = iNdEx
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowImage
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
innerWire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
innerWireType := int(innerWire & 0x7)
|
||||||
|
if innerWireType == 4 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
next, err := skipImage(dAtA[start:])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
iNdEx = start + next
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 4:
|
||||||
|
return iNdEx, nil
|
||||||
|
case 5:
|
||||||
|
iNdEx += 4
|
||||||
|
return iNdEx, nil
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrInvalidLengthImage = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||||
|
ErrIntOverflowImage = fmt.Errorf("proto: integer overflow")
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterFile("github.com/containerd/containerd/api/services/events/v1/image.proto", fileDescriptorImage)
|
||||||
|
}
|
||||||
|
|
||||||
|
var fileDescriptorImage = []byte{
|
||||||
|
// 263 bytes of a gzipped FileDescriptorProto
|
||||||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x4e, 0xcf, 0x2c, 0xc9,
|
||||||
|
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
|
||||||
|
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x17, 0xa7, 0x16, 0x95, 0x65, 0x26, 0xa7, 0x16, 0xeb,
|
||||||
|
0xa7, 0x96, 0xa5, 0xe6, 0x95, 0x14, 0xeb, 0x97, 0x19, 0xea, 0x67, 0xe6, 0x26, 0xa6, 0xa7, 0xea,
|
||||||
|
0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0xc9, 0x22, 0x94, 0xeb, 0xc1, 0x94, 0xea, 0x81, 0x15, 0x14,
|
||||||
|
0xeb, 0x95, 0x19, 0x2a, 0xad, 0x61, 0xe4, 0xe2, 0xf6, 0x04, 0xf1, 0x9c, 0x8b, 0x52, 0x13, 0x4b,
|
||||||
|
0x52, 0x85, 0x84, 0xb8, 0x58, 0xf2, 0x12, 0x73, 0x53, 0x25, 0x18, 0x15, 0x18, 0x35, 0x38, 0x83,
|
||||||
|
0xc0, 0x6c, 0x21, 0x3f, 0x2e, 0xb6, 0x9c, 0xc4, 0xa4, 0xd4, 0x9c, 0x62, 0x09, 0x26, 0x05, 0x66,
|
||||||
|
0x0d, 0x6e, 0x23, 0x33, 0x3d, 0xbc, 0x66, 0xea, 0x21, 0x99, 0xa7, 0xe7, 0x03, 0xd6, 0xe8, 0x9a,
|
||||||
|
0x57, 0x52, 0x54, 0x19, 0x04, 0x35, 0x45, 0xca, 0x92, 0x8b, 0x1b, 0x49, 0x58, 0x48, 0x80, 0x8b,
|
||||||
|
0x39, 0x3b, 0xb5, 0x12, 0x6a, 0x23, 0x88, 0x29, 0x24, 0xc2, 0xc5, 0x5a, 0x96, 0x98, 0x53, 0x9a,
|
||||||
|
0x2a, 0xc1, 0x04, 0x16, 0x83, 0x70, 0xac, 0x98, 0x2c, 0x18, 0x11, 0xce, 0x0d, 0x2d, 0x48, 0xa1,
|
||||||
|
0xaa, 0x73, 0x21, 0xe6, 0x51, 0xdb, 0xb9, 0x8a, 0x50, 0xd7, 0xba, 0xa4, 0xe6, 0xa4, 0x62, 0x77,
|
||||||
|
0xad, 0x53, 0xc4, 0x89, 0x87, 0x72, 0x0c, 0x37, 0x1e, 0xca, 0x31, 0x34, 0x3c, 0x92, 0x63, 0x3c,
|
||||||
|
0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0xa3, 0xec, 0xc8, 0x8c,
|
||||||
|
0x7e, 0x6b, 0x08, 0x2b, 0x89, 0x0d, 0x9c, 0x00, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0x13,
|
||||||
|
0x7c, 0x2c, 0x4a, 0x47, 0x02, 0x00, 0x00,
|
||||||
|
}
|
19
vendor/github.com/containerd/containerd/api/services/events/v1/image.proto
generated
vendored
Normal file
19
vendor/github.com/containerd/containerd/api/services/events/v1/image.proto
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
syntax = "proto3";
|
||||||
|
|
||||||
|
package containerd.services.images.v1;
|
||||||
|
|
||||||
|
option go_package = "github.com/containerd/containerd/api/services/events/v1;events";
|
||||||
|
|
||||||
|
message ImageCreate {
|
||||||
|
string name = 1;
|
||||||
|
map<string, string> labels = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ImageUpdate {
|
||||||
|
string name = 1;
|
||||||
|
map<string, string> labels = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ImageDelete {
|
||||||
|
string name = 1;
|
||||||
|
}
|
904
vendor/github.com/containerd/containerd/api/services/events/v1/namespace.pb.go
generated
vendored
Normal file
904
vendor/github.com/containerd/containerd/api/services/events/v1/namespace.pb.go
generated
vendored
Normal file
@ -0,0 +1,904 @@
|
|||||||
|
// Code generated by protoc-gen-gogo.
|
||||||
|
// source: github.com/containerd/containerd/api/services/events/v1/namespace.proto
|
||||||
|
// DO NOT EDIT!
|
||||||
|
|
||||||
|
package events
|
||||||
|
|
||||||
|
import proto "github.com/gogo/protobuf/proto"
|
||||||
|
import fmt "fmt"
|
||||||
|
import math "math"
|
||||||
|
import _ "github.com/gogo/protobuf/gogoproto"
|
||||||
|
|
||||||
|
import strings "strings"
|
||||||
|
import reflect "reflect"
|
||||||
|
import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
|
||||||
|
|
||||||
|
import io "io"
|
||||||
|
|
||||||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
var _ = proto.Marshal
|
||||||
|
var _ = fmt.Errorf
|
||||||
|
var _ = math.Inf
|
||||||
|
|
||||||
|
type NamespaceCreate struct {
|
||||||
|
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||||
|
Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *NamespaceCreate) Reset() { *m = NamespaceCreate{} }
|
||||||
|
func (*NamespaceCreate) ProtoMessage() {}
|
||||||
|
func (*NamespaceCreate) Descriptor() ([]byte, []int) { return fileDescriptorNamespace, []int{0} }
|
||||||
|
|
||||||
|
type NamespaceUpdate struct {
|
||||||
|
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||||
|
Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *NamespaceUpdate) Reset() { *m = NamespaceUpdate{} }
|
||||||
|
func (*NamespaceUpdate) ProtoMessage() {}
|
||||||
|
func (*NamespaceUpdate) Descriptor() ([]byte, []int) { return fileDescriptorNamespace, []int{1} }
|
||||||
|
|
||||||
|
type NamespaceDelete struct {
|
||||||
|
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *NamespaceDelete) Reset() { *m = NamespaceDelete{} }
|
||||||
|
func (*NamespaceDelete) ProtoMessage() {}
|
||||||
|
func (*NamespaceDelete) Descriptor() ([]byte, []int) { return fileDescriptorNamespace, []int{2} }
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterType((*NamespaceCreate)(nil), "containerd.services.events.v1.NamespaceCreate")
|
||||||
|
proto.RegisterType((*NamespaceUpdate)(nil), "containerd.services.events.v1.NamespaceUpdate")
|
||||||
|
proto.RegisterType((*NamespaceDelete)(nil), "containerd.services.events.v1.NamespaceDelete")
|
||||||
|
}
|
||||||
|
func (m *NamespaceCreate) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalTo(dAtA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *NamespaceCreate) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if len(m.Name) > 0 {
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
i++
|
||||||
|
i = encodeVarintNamespace(dAtA, i, uint64(len(m.Name)))
|
||||||
|
i += copy(dAtA[i:], m.Name)
|
||||||
|
}
|
||||||
|
if len(m.Labels) > 0 {
|
||||||
|
for k, _ := range m.Labels {
|
||||||
|
dAtA[i] = 0x12
|
||||||
|
i++
|
||||||
|
v := m.Labels[k]
|
||||||
|
mapSize := 1 + len(k) + sovNamespace(uint64(len(k))) + 1 + len(v) + sovNamespace(uint64(len(v)))
|
||||||
|
i = encodeVarintNamespace(dAtA, i, uint64(mapSize))
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
i++
|
||||||
|
i = encodeVarintNamespace(dAtA, i, uint64(len(k)))
|
||||||
|
i += copy(dAtA[i:], k)
|
||||||
|
dAtA[i] = 0x12
|
||||||
|
i++
|
||||||
|
i = encodeVarintNamespace(dAtA, i, uint64(len(v)))
|
||||||
|
i += copy(dAtA[i:], v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *NamespaceUpdate) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalTo(dAtA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *NamespaceUpdate) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if len(m.Name) > 0 {
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
i++
|
||||||
|
i = encodeVarintNamespace(dAtA, i, uint64(len(m.Name)))
|
||||||
|
i += copy(dAtA[i:], m.Name)
|
||||||
|
}
|
||||||
|
if len(m.Labels) > 0 {
|
||||||
|
for k, _ := range m.Labels {
|
||||||
|
dAtA[i] = 0x12
|
||||||
|
i++
|
||||||
|
v := m.Labels[k]
|
||||||
|
mapSize := 1 + len(k) + sovNamespace(uint64(len(k))) + 1 + len(v) + sovNamespace(uint64(len(v)))
|
||||||
|
i = encodeVarintNamespace(dAtA, i, uint64(mapSize))
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
i++
|
||||||
|
i = encodeVarintNamespace(dAtA, i, uint64(len(k)))
|
||||||
|
i += copy(dAtA[i:], k)
|
||||||
|
dAtA[i] = 0x12
|
||||||
|
i++
|
||||||
|
i = encodeVarintNamespace(dAtA, i, uint64(len(v)))
|
||||||
|
i += copy(dAtA[i:], v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *NamespaceDelete) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalTo(dAtA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *NamespaceDelete) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if len(m.Name) > 0 {
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
i++
|
||||||
|
i = encodeVarintNamespace(dAtA, i, uint64(len(m.Name)))
|
||||||
|
i += copy(dAtA[i:], m.Name)
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeFixed64Namespace(dAtA []byte, offset int, v uint64) int {
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
|
dAtA[offset+4] = uint8(v >> 32)
|
||||||
|
dAtA[offset+5] = uint8(v >> 40)
|
||||||
|
dAtA[offset+6] = uint8(v >> 48)
|
||||||
|
dAtA[offset+7] = uint8(v >> 56)
|
||||||
|
return offset + 8
|
||||||
|
}
|
||||||
|
func encodeFixed32Namespace(dAtA []byte, offset int, v uint32) int {
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
|
return offset + 4
|
||||||
|
}
|
||||||
|
func encodeVarintNamespace(dAtA []byte, offset int, v uint64) int {
|
||||||
|
for v >= 1<<7 {
|
||||||
|
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||||
|
v >>= 7
|
||||||
|
offset++
|
||||||
|
}
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
return offset + 1
|
||||||
|
}
|
||||||
|
func (m *NamespaceCreate) Size() (n int) {
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
l = len(m.Name)
|
||||||
|
if l > 0 {
|
||||||
|
n += 1 + l + sovNamespace(uint64(l))
|
||||||
|
}
|
||||||
|
if len(m.Labels) > 0 {
|
||||||
|
for k, v := range m.Labels {
|
||||||
|
_ = k
|
||||||
|
_ = v
|
||||||
|
mapEntrySize := 1 + len(k) + sovNamespace(uint64(len(k))) + 1 + len(v) + sovNamespace(uint64(len(v)))
|
||||||
|
n += mapEntrySize + 1 + sovNamespace(uint64(mapEntrySize))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *NamespaceUpdate) Size() (n int) {
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
l = len(m.Name)
|
||||||
|
if l > 0 {
|
||||||
|
n += 1 + l + sovNamespace(uint64(l))
|
||||||
|
}
|
||||||
|
if len(m.Labels) > 0 {
|
||||||
|
for k, v := range m.Labels {
|
||||||
|
_ = k
|
||||||
|
_ = v
|
||||||
|
mapEntrySize := 1 + len(k) + sovNamespace(uint64(len(k))) + 1 + len(v) + sovNamespace(uint64(len(v)))
|
||||||
|
n += mapEntrySize + 1 + sovNamespace(uint64(mapEntrySize))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *NamespaceDelete) Size() (n int) {
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
l = len(m.Name)
|
||||||
|
if l > 0 {
|
||||||
|
n += 1 + l + sovNamespace(uint64(l))
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func sovNamespace(x uint64) (n int) {
|
||||||
|
for {
|
||||||
|
n++
|
||||||
|
x >>= 7
|
||||||
|
if x == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
func sozNamespace(x uint64) (n int) {
|
||||||
|
return sovNamespace(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||||
|
}
|
||||||
|
func (this *NamespaceCreate) String() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
keysForLabels := make([]string, 0, len(this.Labels))
|
||||||
|
for k, _ := range this.Labels {
|
||||||
|
keysForLabels = append(keysForLabels, k)
|
||||||
|
}
|
||||||
|
github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
|
||||||
|
mapStringForLabels := "map[string]string{"
|
||||||
|
for _, k := range keysForLabels {
|
||||||
|
mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
|
||||||
|
}
|
||||||
|
mapStringForLabels += "}"
|
||||||
|
s := strings.Join([]string{`&NamespaceCreate{`,
|
||||||
|
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
|
||||||
|
`Labels:` + mapStringForLabels + `,`,
|
||||||
|
`}`,
|
||||||
|
}, "")
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
func (this *NamespaceUpdate) String() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
keysForLabels := make([]string, 0, len(this.Labels))
|
||||||
|
for k, _ := range this.Labels {
|
||||||
|
keysForLabels = append(keysForLabels, k)
|
||||||
|
}
|
||||||
|
github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
|
||||||
|
mapStringForLabels := "map[string]string{"
|
||||||
|
for _, k := range keysForLabels {
|
||||||
|
mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
|
||||||
|
}
|
||||||
|
mapStringForLabels += "}"
|
||||||
|
s := strings.Join([]string{`&NamespaceUpdate{`,
|
||||||
|
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
|
||||||
|
`Labels:` + mapStringForLabels + `,`,
|
||||||
|
`}`,
|
||||||
|
}, "")
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
func (this *NamespaceDelete) String() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
s := strings.Join([]string{`&NamespaceDelete{`,
|
||||||
|
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
|
||||||
|
`}`,
|
||||||
|
}, "")
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
func valueToStringNamespace(v interface{}) string {
|
||||||
|
rv := reflect.ValueOf(v)
|
||||||
|
if rv.IsNil() {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
pv := reflect.Indirect(rv).Interface()
|
||||||
|
return fmt.Sprintf("*%v", pv)
|
||||||
|
}
|
||||||
|
func (m *NamespaceCreate) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowNamespace
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: NamespaceCreate: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: NamespaceCreate: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowNamespace
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthNamespace
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Name = string(dAtA[iNdEx:postIndex])
|
||||||
|
iNdEx = postIndex
|
||||||
|
case 2:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
|
||||||
|
}
|
||||||
|
var msglen int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowNamespace
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if msglen < 0 {
|
||||||
|
return ErrInvalidLengthNamespace
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + msglen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
var keykey uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowNamespace
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
keykey |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var stringLenmapkey uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowNamespace
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLenmapkey := int(stringLenmapkey)
|
||||||
|
if intStringLenmapkey < 0 {
|
||||||
|
return ErrInvalidLengthNamespace
|
||||||
|
}
|
||||||
|
postStringIndexmapkey := iNdEx + intStringLenmapkey
|
||||||
|
if postStringIndexmapkey > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
|
||||||
|
iNdEx = postStringIndexmapkey
|
||||||
|
if m.Labels == nil {
|
||||||
|
m.Labels = make(map[string]string)
|
||||||
|
}
|
||||||
|
if iNdEx < postIndex {
|
||||||
|
var valuekey uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowNamespace
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
valuekey |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var stringLenmapvalue uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowNamespace
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLenmapvalue |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLenmapvalue := int(stringLenmapvalue)
|
||||||
|
if intStringLenmapvalue < 0 {
|
||||||
|
return ErrInvalidLengthNamespace
|
||||||
|
}
|
||||||
|
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
|
||||||
|
if postStringIndexmapvalue > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
|
||||||
|
iNdEx = postStringIndexmapvalue
|
||||||
|
m.Labels[mapkey] = mapvalue
|
||||||
|
} else {
|
||||||
|
var mapvalue string
|
||||||
|
m.Labels[mapkey] = mapvalue
|
||||||
|
}
|
||||||
|
iNdEx = postIndex
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipNamespace(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthNamespace
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (m *NamespaceUpdate) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowNamespace
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: NamespaceUpdate: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: NamespaceUpdate: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowNamespace
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthNamespace
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Name = string(dAtA[iNdEx:postIndex])
|
||||||
|
iNdEx = postIndex
|
||||||
|
case 2:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
|
||||||
|
}
|
||||||
|
var msglen int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowNamespace
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if msglen < 0 {
|
||||||
|
return ErrInvalidLengthNamespace
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + msglen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
var keykey uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowNamespace
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
keykey |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var stringLenmapkey uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowNamespace
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLenmapkey := int(stringLenmapkey)
|
||||||
|
if intStringLenmapkey < 0 {
|
||||||
|
return ErrInvalidLengthNamespace
|
||||||
|
}
|
||||||
|
postStringIndexmapkey := iNdEx + intStringLenmapkey
|
||||||
|
if postStringIndexmapkey > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
|
||||||
|
iNdEx = postStringIndexmapkey
|
||||||
|
if m.Labels == nil {
|
||||||
|
m.Labels = make(map[string]string)
|
||||||
|
}
|
||||||
|
if iNdEx < postIndex {
|
||||||
|
var valuekey uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowNamespace
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
valuekey |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var stringLenmapvalue uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowNamespace
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLenmapvalue |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLenmapvalue := int(stringLenmapvalue)
|
||||||
|
if intStringLenmapvalue < 0 {
|
||||||
|
return ErrInvalidLengthNamespace
|
||||||
|
}
|
||||||
|
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
|
||||||
|
if postStringIndexmapvalue > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
|
||||||
|
iNdEx = postStringIndexmapvalue
|
||||||
|
m.Labels[mapkey] = mapvalue
|
||||||
|
} else {
|
||||||
|
var mapvalue string
|
||||||
|
m.Labels[mapkey] = mapvalue
|
||||||
|
}
|
||||||
|
iNdEx = postIndex
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipNamespace(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthNamespace
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (m *NamespaceDelete) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowNamespace
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: NamespaceDelete: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: NamespaceDelete: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowNamespace
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthNamespace
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Name = string(dAtA[iNdEx:postIndex])
|
||||||
|
iNdEx = postIndex
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipNamespace(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthNamespace
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func skipNamespace(dAtA []byte) (n int, err error) {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowNamespace
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
switch wireType {
|
||||||
|
case 0:
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowNamespace
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx++
|
||||||
|
if dAtA[iNdEx-1] < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 1:
|
||||||
|
iNdEx += 8
|
||||||
|
return iNdEx, nil
|
||||||
|
case 2:
|
||||||
|
var length int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowNamespace
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
length |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
iNdEx += length
|
||||||
|
if length < 0 {
|
||||||
|
return 0, ErrInvalidLengthNamespace
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 3:
|
||||||
|
for {
|
||||||
|
var innerWire uint64
|
||||||
|
var start int = iNdEx
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowNamespace
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
innerWire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
innerWireType := int(innerWire & 0x7)
|
||||||
|
if innerWireType == 4 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
next, err := skipNamespace(dAtA[start:])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
iNdEx = start + next
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 4:
|
||||||
|
return iNdEx, nil
|
||||||
|
case 5:
|
||||||
|
iNdEx += 4
|
||||||
|
return iNdEx, nil
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrInvalidLengthNamespace = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||||
|
ErrIntOverflowNamespace = fmt.Errorf("proto: integer overflow")
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterFile("github.com/containerd/containerd/api/services/events/v1/namespace.proto", fileDescriptorNamespace)
|
||||||
|
}
|
||||||
|
|
||||||
|
var fileDescriptorNamespace = []byte{
|
||||||
|
// 277 bytes of a gzipped FileDescriptorProto
|
||||||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x4f, 0xcf, 0x2c, 0xc9,
|
||||||
|
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
|
||||||
|
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x17, 0xa7, 0x16, 0x95, 0x65, 0x26, 0xa7, 0x16, 0xeb,
|
||||||
|
0xa7, 0x96, 0xa5, 0xe6, 0x95, 0x14, 0xeb, 0x97, 0x19, 0xea, 0xe7, 0x25, 0xe6, 0xa6, 0x16, 0x17,
|
||||||
|
0x24, 0x26, 0xa7, 0xea, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0xc9, 0x22, 0xb4, 0xe8, 0xc1, 0x94,
|
||||||
|
0xeb, 0x41, 0x94, 0xeb, 0x95, 0x19, 0x4a, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x55, 0xea, 0x83,
|
||||||
|
0x58, 0x10, 0x4d, 0x4a, 0x5b, 0x18, 0xb9, 0xf8, 0xfd, 0x60, 0x06, 0x39, 0x17, 0xa5, 0x26, 0x96,
|
||||||
|
0xa4, 0x0a, 0x09, 0x71, 0xb1, 0x80, 0xcc, 0x96, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0c, 0x02, 0xb3,
|
||||||
|
0x85, 0x82, 0xb8, 0xd8, 0x72, 0x12, 0x93, 0x52, 0x73, 0x8a, 0x25, 0x98, 0x14, 0x98, 0x35, 0xb8,
|
||||||
|
0x8d, 0xac, 0xf4, 0xf0, 0xda, 0xa6, 0x87, 0x66, 0xa6, 0x9e, 0x0f, 0x58, 0xb3, 0x6b, 0x5e, 0x49,
|
||||||
|
0x51, 0x65, 0x10, 0xd4, 0x24, 0x29, 0x4b, 0x2e, 0x6e, 0x24, 0x61, 0x21, 0x01, 0x2e, 0xe6, 0xec,
|
||||||
|
0xd4, 0x4a, 0xa8, 0xad, 0x20, 0xa6, 0x90, 0x08, 0x17, 0x6b, 0x59, 0x62, 0x4e, 0x69, 0xaa, 0x04,
|
||||||
|
0x13, 0x58, 0x0c, 0xc2, 0xb1, 0x62, 0xb2, 0x60, 0x44, 0x75, 0x76, 0x68, 0x41, 0x0a, 0xd5, 0x9d,
|
||||||
|
0x0d, 0x31, 0x93, 0xda, 0xce, 0x56, 0x45, 0x72, 0xb5, 0x4b, 0x6a, 0x4e, 0x2a, 0x76, 0x57, 0x3b,
|
||||||
|
0x45, 0x9c, 0x78, 0x28, 0xc7, 0x70, 0xe3, 0xa1, 0x1c, 0x43, 0xc3, 0x23, 0x39, 0xc6, 0x13, 0x8f,
|
||||||
|
0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x31, 0xca, 0x8e, 0xcc, 0xc4, 0x62,
|
||||||
|
0x0d, 0x61, 0x25, 0xb1, 0x81, 0x63, 0xdd, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0xbe, 0xf0, 0x68,
|
||||||
|
0xa6, 0x75, 0x02, 0x00, 0x00,
|
||||||
|
}
|
21
vendor/github.com/containerd/containerd/api/services/events/v1/namespace.proto
generated
vendored
Normal file
21
vendor/github.com/containerd/containerd/api/services/events/v1/namespace.proto
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
syntax = "proto3";
|
||||||
|
|
||||||
|
package containerd.services.events.v1;
|
||||||
|
|
||||||
|
import "gogoproto/gogo.proto";
|
||||||
|
|
||||||
|
option go_package = "github.com/containerd/containerd/api/services/events/v1;events";
|
||||||
|
|
||||||
|
message NamespaceCreate {
|
||||||
|
string name = 1;
|
||||||
|
map<string, string> labels = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message NamespaceUpdate {
|
||||||
|
string name = 1;
|
||||||
|
map<string, string> labels = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message NamespaceDelete {
|
||||||
|
string name = 1;
|
||||||
|
}
|
674
vendor/github.com/containerd/containerd/api/services/events/v1/snapshot.pb.go
generated
vendored
Normal file
674
vendor/github.com/containerd/containerd/api/services/events/v1/snapshot.pb.go
generated
vendored
Normal file
@ -0,0 +1,674 @@
|
|||||||
|
// Code generated by protoc-gen-gogo.
|
||||||
|
// source: github.com/containerd/containerd/api/services/events/v1/snapshot.proto
|
||||||
|
// DO NOT EDIT!
|
||||||
|
|
||||||
|
package events
|
||||||
|
|
||||||
|
import proto "github.com/gogo/protobuf/proto"
|
||||||
|
import fmt "fmt"
|
||||||
|
import math "math"
|
||||||
|
|
||||||
|
import strings "strings"
|
||||||
|
import reflect "reflect"
|
||||||
|
|
||||||
|
import io "io"
|
||||||
|
|
||||||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
var _ = proto.Marshal
|
||||||
|
var _ = fmt.Errorf
|
||||||
|
var _ = math.Inf
|
||||||
|
|
||||||
|
type SnapshotPrepare struct {
|
||||||
|
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
|
||||||
|
Parent string `protobuf:"bytes,2,opt,name=parent,proto3" json:"parent,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SnapshotPrepare) Reset() { *m = SnapshotPrepare{} }
|
||||||
|
func (*SnapshotPrepare) ProtoMessage() {}
|
||||||
|
func (*SnapshotPrepare) Descriptor() ([]byte, []int) { return fileDescriptorSnapshot, []int{0} }
|
||||||
|
|
||||||
|
type SnapshotCommit struct {
|
||||||
|
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
|
||||||
|
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SnapshotCommit) Reset() { *m = SnapshotCommit{} }
|
||||||
|
func (*SnapshotCommit) ProtoMessage() {}
|
||||||
|
func (*SnapshotCommit) Descriptor() ([]byte, []int) { return fileDescriptorSnapshot, []int{1} }
|
||||||
|
|
||||||
|
type SnapshotRemove struct {
|
||||||
|
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SnapshotRemove) Reset() { *m = SnapshotRemove{} }
|
||||||
|
func (*SnapshotRemove) ProtoMessage() {}
|
||||||
|
func (*SnapshotRemove) Descriptor() ([]byte, []int) { return fileDescriptorSnapshot, []int{2} }
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterType((*SnapshotPrepare)(nil), "containerd.services.events.v1.SnapshotPrepare")
|
||||||
|
proto.RegisterType((*SnapshotCommit)(nil), "containerd.services.events.v1.SnapshotCommit")
|
||||||
|
proto.RegisterType((*SnapshotRemove)(nil), "containerd.services.events.v1.SnapshotRemove")
|
||||||
|
}
|
||||||
|
func (m *SnapshotPrepare) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalTo(dAtA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SnapshotPrepare) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if len(m.Key) > 0 {
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
i++
|
||||||
|
i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Key)))
|
||||||
|
i += copy(dAtA[i:], m.Key)
|
||||||
|
}
|
||||||
|
if len(m.Parent) > 0 {
|
||||||
|
dAtA[i] = 0x12
|
||||||
|
i++
|
||||||
|
i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Parent)))
|
||||||
|
i += copy(dAtA[i:], m.Parent)
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SnapshotCommit) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalTo(dAtA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SnapshotCommit) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if len(m.Key) > 0 {
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
i++
|
||||||
|
i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Key)))
|
||||||
|
i += copy(dAtA[i:], m.Key)
|
||||||
|
}
|
||||||
|
if len(m.Name) > 0 {
|
||||||
|
dAtA[i] = 0x12
|
||||||
|
i++
|
||||||
|
i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Name)))
|
||||||
|
i += copy(dAtA[i:], m.Name)
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SnapshotRemove) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalTo(dAtA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SnapshotRemove) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if len(m.Key) > 0 {
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
i++
|
||||||
|
i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Key)))
|
||||||
|
i += copy(dAtA[i:], m.Key)
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeFixed64Snapshot(dAtA []byte, offset int, v uint64) int {
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
|
dAtA[offset+4] = uint8(v >> 32)
|
||||||
|
dAtA[offset+5] = uint8(v >> 40)
|
||||||
|
dAtA[offset+6] = uint8(v >> 48)
|
||||||
|
dAtA[offset+7] = uint8(v >> 56)
|
||||||
|
return offset + 8
|
||||||
|
}
|
||||||
|
func encodeFixed32Snapshot(dAtA []byte, offset int, v uint32) int {
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
|
return offset + 4
|
||||||
|
}
|
||||||
|
func encodeVarintSnapshot(dAtA []byte, offset int, v uint64) int {
|
||||||
|
for v >= 1<<7 {
|
||||||
|
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||||
|
v >>= 7
|
||||||
|
offset++
|
||||||
|
}
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
return offset + 1
|
||||||
|
}
|
||||||
|
func (m *SnapshotPrepare) Size() (n int) {
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
l = len(m.Key)
|
||||||
|
if l > 0 {
|
||||||
|
n += 1 + l + sovSnapshot(uint64(l))
|
||||||
|
}
|
||||||
|
l = len(m.Parent)
|
||||||
|
if l > 0 {
|
||||||
|
n += 1 + l + sovSnapshot(uint64(l))
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SnapshotCommit) Size() (n int) {
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
l = len(m.Key)
|
||||||
|
if l > 0 {
|
||||||
|
n += 1 + l + sovSnapshot(uint64(l))
|
||||||
|
}
|
||||||
|
l = len(m.Name)
|
||||||
|
if l > 0 {
|
||||||
|
n += 1 + l + sovSnapshot(uint64(l))
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SnapshotRemove) Size() (n int) {
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
l = len(m.Key)
|
||||||
|
if l > 0 {
|
||||||
|
n += 1 + l + sovSnapshot(uint64(l))
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func sovSnapshot(x uint64) (n int) {
|
||||||
|
for {
|
||||||
|
n++
|
||||||
|
x >>= 7
|
||||||
|
if x == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
func sozSnapshot(x uint64) (n int) {
|
||||||
|
return sovSnapshot(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||||
|
}
|
||||||
|
func (this *SnapshotPrepare) String() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
s := strings.Join([]string{`&SnapshotPrepare{`,
|
||||||
|
`Key:` + fmt.Sprintf("%v", this.Key) + `,`,
|
||||||
|
`Parent:` + fmt.Sprintf("%v", this.Parent) + `,`,
|
||||||
|
`}`,
|
||||||
|
}, "")
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
func (this *SnapshotCommit) String() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
s := strings.Join([]string{`&SnapshotCommit{`,
|
||||||
|
`Key:` + fmt.Sprintf("%v", this.Key) + `,`,
|
||||||
|
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
|
||||||
|
`}`,
|
||||||
|
}, "")
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
func (this *SnapshotRemove) String() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
s := strings.Join([]string{`&SnapshotRemove{`,
|
||||||
|
`Key:` + fmt.Sprintf("%v", this.Key) + `,`,
|
||||||
|
`}`,
|
||||||
|
}, "")
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
func valueToStringSnapshot(v interface{}) string {
|
||||||
|
rv := reflect.ValueOf(v)
|
||||||
|
if rv.IsNil() {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
pv := reflect.Indirect(rv).Interface()
|
||||||
|
return fmt.Sprintf("*%v", pv)
|
||||||
|
}
|
||||||
|
func (m *SnapshotPrepare) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowSnapshot
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: SnapshotPrepare: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: SnapshotPrepare: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowSnapshot
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthSnapshot
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Key = string(dAtA[iNdEx:postIndex])
|
||||||
|
iNdEx = postIndex
|
||||||
|
case 2:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Parent", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowSnapshot
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthSnapshot
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Parent = string(dAtA[iNdEx:postIndex])
|
||||||
|
iNdEx = postIndex
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipSnapshot(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthSnapshot
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (m *SnapshotCommit) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowSnapshot
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: SnapshotCommit: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: SnapshotCommit: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowSnapshot
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthSnapshot
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Key = string(dAtA[iNdEx:postIndex])
|
||||||
|
iNdEx = postIndex
|
||||||
|
case 2:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowSnapshot
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthSnapshot
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Name = string(dAtA[iNdEx:postIndex])
|
||||||
|
iNdEx = postIndex
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipSnapshot(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthSnapshot
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (m *SnapshotRemove) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowSnapshot
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: SnapshotRemove: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: SnapshotRemove: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowSnapshot
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthSnapshot
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Key = string(dAtA[iNdEx:postIndex])
|
||||||
|
iNdEx = postIndex
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipSnapshot(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthSnapshot
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func skipSnapshot(dAtA []byte) (n int, err error) {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowSnapshot
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
switch wireType {
|
||||||
|
case 0:
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowSnapshot
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx++
|
||||||
|
if dAtA[iNdEx-1] < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 1:
|
||||||
|
iNdEx += 8
|
||||||
|
return iNdEx, nil
|
||||||
|
case 2:
|
||||||
|
var length int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowSnapshot
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
length |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
iNdEx += length
|
||||||
|
if length < 0 {
|
||||||
|
return 0, ErrInvalidLengthSnapshot
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 3:
|
||||||
|
for {
|
||||||
|
var innerWire uint64
|
||||||
|
var start int = iNdEx
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowSnapshot
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
innerWire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
innerWireType := int(innerWire & 0x7)
|
||||||
|
if innerWireType == 4 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
next, err := skipSnapshot(dAtA[start:])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
iNdEx = start + next
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 4:
|
||||||
|
return iNdEx, nil
|
||||||
|
case 5:
|
||||||
|
iNdEx += 4
|
||||||
|
return iNdEx, nil
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrInvalidLengthSnapshot = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||||
|
ErrIntOverflowSnapshot = fmt.Errorf("proto: integer overflow")
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterFile("github.com/containerd/containerd/api/services/events/v1/snapshot.proto", fileDescriptorSnapshot)
|
||||||
|
}
|
||||||
|
|
||||||
|
var fileDescriptorSnapshot = []byte{
|
||||||
|
// 219 bytes of a gzipped FileDescriptorProto
|
||||||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x4b, 0xcf, 0x2c, 0xc9,
|
||||||
|
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
|
||||||
|
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x17, 0xa7, 0x16, 0x95, 0x65, 0x26, 0xa7, 0x16, 0xeb,
|
||||||
|
0xa7, 0x96, 0xa5, 0xe6, 0x95, 0x14, 0xeb, 0x97, 0x19, 0xea, 0x17, 0xe7, 0x25, 0x16, 0x14, 0x67,
|
||||||
|
0xe4, 0x97, 0xe8, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0xc9, 0x22, 0x74, 0xe8, 0xc1, 0x54, 0xeb,
|
||||||
|
0x41, 0x54, 0xeb, 0x95, 0x19, 0x2a, 0x59, 0x73, 0xf1, 0x07, 0x43, 0x35, 0x04, 0x14, 0xa5, 0x16,
|
||||||
|
0x24, 0x16, 0xa5, 0x0a, 0x09, 0x70, 0x31, 0x67, 0xa7, 0x56, 0x4a, 0x30, 0x2a, 0x30, 0x6a, 0x70,
|
||||||
|
0x06, 0x81, 0x98, 0x42, 0x62, 0x5c, 0x6c, 0x20, 0x99, 0xbc, 0x12, 0x09, 0x26, 0xb0, 0x20, 0x94,
|
||||||
|
0xa7, 0x64, 0xc6, 0xc5, 0x07, 0xd3, 0xec, 0x9c, 0x9f, 0x9b, 0x9b, 0x59, 0x82, 0x45, 0xaf, 0x10,
|
||||||
|
0x17, 0x4b, 0x5e, 0x62, 0x6e, 0x2a, 0x54, 0x27, 0x98, 0xad, 0xa4, 0x84, 0xd0, 0x17, 0x94, 0x9a,
|
||||||
|
0x9b, 0x5f, 0x86, 0xc5, 0x4e, 0xa7, 0x88, 0x13, 0x0f, 0xe5, 0x18, 0x6e, 0x3c, 0x94, 0x63, 0x68,
|
||||||
|
0x78, 0x24, 0xc7, 0x78, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31,
|
||||||
|
0x46, 0xd9, 0x91, 0x19, 0x32, 0xd6, 0x10, 0x56, 0x12, 0x1b, 0x38, 0x60, 0x8c, 0x01, 0x01, 0x00,
|
||||||
|
0x00, 0xff, 0xff, 0x10, 0x4c, 0x3d, 0xb2, 0x62, 0x01, 0x00, 0x00,
|
||||||
|
}
|
19
vendor/github.com/containerd/containerd/api/services/events/v1/snapshot.proto
generated
vendored
Normal file
19
vendor/github.com/containerd/containerd/api/services/events/v1/snapshot.proto
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
syntax = "proto3";
|
||||||
|
|
||||||
|
package containerd.services.events.v1;
|
||||||
|
|
||||||
|
option go_package = "github.com/containerd/containerd/api/services/events/v1;events";
|
||||||
|
|
||||||
|
message SnapshotPrepare {
|
||||||
|
string key = 1;
|
||||||
|
string parent = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message SnapshotCommit {
|
||||||
|
string key = 1;
|
||||||
|
string name = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message SnapshotRemove {
|
||||||
|
string key = 1;
|
||||||
|
}
|
2257
vendor/github.com/containerd/containerd/api/services/events/v1/task.pb.go
generated
vendored
Normal file
2257
vendor/github.com/containerd/containerd/api/services/events/v1/task.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
68
vendor/github.com/containerd/containerd/api/services/events/v1/task.proto
generated
vendored
Normal file
68
vendor/github.com/containerd/containerd/api/services/events/v1/task.proto
generated
vendored
Normal file
@ -0,0 +1,68 @@
|
|||||||
|
syntax = "proto3";
|
||||||
|
|
||||||
|
package containerd.services.events.v1;
|
||||||
|
|
||||||
|
import "gogoproto/gogo.proto";
|
||||||
|
import "google/protobuf/timestamp.proto";
|
||||||
|
import "github.com/containerd/containerd/api/types/mount.proto";
|
||||||
|
|
||||||
|
option go_package = "github.com/containerd/containerd/api/services/events/v1;events";
|
||||||
|
|
||||||
|
message TaskCreate {
|
||||||
|
string container_id = 1;
|
||||||
|
string bundle = 2;
|
||||||
|
repeated containerd.types.Mount rootfs = 3;
|
||||||
|
TaskIO io = 4 [(gogoproto.customname) = "IO"];
|
||||||
|
string checkpoint = 5;
|
||||||
|
uint32 pid = 6;
|
||||||
|
}
|
||||||
|
|
||||||
|
message TaskStart {
|
||||||
|
string container_id = 1;
|
||||||
|
uint32 pid = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message TaskDelete {
|
||||||
|
string container_id = 1;
|
||||||
|
uint32 pid = 2;
|
||||||
|
uint32 exit_status = 3;
|
||||||
|
google.protobuf.Timestamp exited_at = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||||
|
}
|
||||||
|
|
||||||
|
message TaskIO {
|
||||||
|
string stdin = 1;
|
||||||
|
string stdout = 2;
|
||||||
|
string stderr = 3;
|
||||||
|
bool terminal = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
message TaskExit {
|
||||||
|
string container_id = 1;
|
||||||
|
string id = 2;
|
||||||
|
uint32 pid = 3;
|
||||||
|
uint32 exit_status = 4;
|
||||||
|
google.protobuf.Timestamp exited_at = 5 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||||
|
}
|
||||||
|
|
||||||
|
message TaskOOM {
|
||||||
|
string container_id = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message TaskExecAdded {
|
||||||
|
string container_id = 1;
|
||||||
|
string exec_id = 2;
|
||||||
|
uint32 pid = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message TaskPaused {
|
||||||
|
string container_id = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message TaskResumed {
|
||||||
|
string container_id = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message TaskCheckpointed {
|
||||||
|
string container_id = 1;
|
||||||
|
string checkpoint = 2;
|
||||||
|
}
|
176
vendor/github.com/containerd/containerd/api/services/execution/execution.proto
generated
vendored
176
vendor/github.com/containerd/containerd/api/services/execution/execution.proto
generated
vendored
@ -1,176 +0,0 @@
|
|||||||
syntax = "proto3";
|
|
||||||
|
|
||||||
package containerd.v1.services.execution;
|
|
||||||
|
|
||||||
import "google/protobuf/empty.proto";
|
|
||||||
import "google/protobuf/any.proto";
|
|
||||||
import "gogoproto/gogo.proto";
|
|
||||||
import "github.com/containerd/containerd/api/types/mount/mount.proto";
|
|
||||||
import "github.com/containerd/containerd/api/types/descriptor/descriptor.proto";
|
|
||||||
import "github.com/containerd/containerd/api/types/task/task.proto";
|
|
||||||
import "google/protobuf/timestamp.proto";
|
|
||||||
|
|
||||||
service Tasks {
|
|
||||||
rpc Create(CreateRequest) returns (CreateResponse);
|
|
||||||
rpc Start(StartRequest) returns (google.protobuf.Empty);
|
|
||||||
rpc Delete(DeleteRequest) returns (DeleteResponse);
|
|
||||||
rpc DeleteProcess(DeleteProcessRequest) returns (DeleteResponse);
|
|
||||||
rpc Info(InfoRequest) returns (InfoResponse);
|
|
||||||
rpc List(ListRequest) returns (ListResponse);
|
|
||||||
rpc Kill(KillRequest) returns (google.protobuf.Empty);
|
|
||||||
rpc Events(EventsRequest) returns (stream containerd.v1.types.Event);
|
|
||||||
rpc Exec(ExecRequest) returns (ExecResponse);
|
|
||||||
rpc Pty(PtyRequest) returns (google.protobuf.Empty);
|
|
||||||
rpc CloseStdin(CloseStdinRequest) returns (google.protobuf.Empty);
|
|
||||||
rpc Pause(PauseRequest) returns (google.protobuf.Empty);
|
|
||||||
rpc Resume(ResumeRequest) returns (google.protobuf.Empty);
|
|
||||||
rpc Processes(ProcessesRequest) returns (ProcessesResponse);
|
|
||||||
rpc Checkpoint(CheckpointRequest) returns (CheckpointResponse);
|
|
||||||
}
|
|
||||||
|
|
||||||
message CreateRequest {
|
|
||||||
// ContainerID specifies the container to use for creating this task.
|
|
||||||
//
|
|
||||||
// The spec from the provided container id will be used to create the
|
|
||||||
// task associated with this container. Only one task can be run at a time
|
|
||||||
// per container.
|
|
||||||
//
|
|
||||||
// This should be created using the Containers service.
|
|
||||||
string container_id = 2;
|
|
||||||
|
|
||||||
// RootFS provides the pre-chroot mounts to perform in the shim before
|
|
||||||
// executing the container task.
|
|
||||||
//
|
|
||||||
// These are for mounts that cannot be performed in the user namespace.
|
|
||||||
// Typically, these mounts should be resolved from snapshots specified on
|
|
||||||
// the container object.
|
|
||||||
repeated containerd.v1.types.Mount rootfs = 3;
|
|
||||||
|
|
||||||
string stdin = 5;
|
|
||||||
string stdout = 6;
|
|
||||||
string stderr = 7;
|
|
||||||
bool terminal = 8;
|
|
||||||
|
|
||||||
types.Descriptor checkpoint = 9;
|
|
||||||
}
|
|
||||||
|
|
||||||
message CreateResponse {
|
|
||||||
// TODO(stevvooe): We no longer have an id for a task since they are bound
|
|
||||||
// to a single container. Although, we should represent each new task with
|
|
||||||
// an ID so one can differentiate between each instance of a container
|
|
||||||
// running.
|
|
||||||
//
|
|
||||||
// Hence, we are leaving this here and reserving the field number in case
|
|
||||||
// we need to move in this direction.
|
|
||||||
// string id = 1;
|
|
||||||
|
|
||||||
string container_id = 2;
|
|
||||||
uint32 pid = 3;
|
|
||||||
}
|
|
||||||
|
|
||||||
message StartRequest {
|
|
||||||
string container_id = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message DeleteRequest {
|
|
||||||
string container_id = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message DeleteResponse {
|
|
||||||
string container_id = 1;
|
|
||||||
uint32 exit_status = 2;
|
|
||||||
google.protobuf.Timestamp exited_at = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
|
||||||
}
|
|
||||||
|
|
||||||
message DeleteProcessRequest {
|
|
||||||
string container_id = 1;
|
|
||||||
uint32 pid = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
message InfoRequest {
|
|
||||||
string container_id = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message InfoResponse {
|
|
||||||
types.Task task = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message ListRequest {
|
|
||||||
}
|
|
||||||
|
|
||||||
message ListResponse {
|
|
||||||
repeated containerd.v1.types.Task tasks = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message KillRequest {
|
|
||||||
string container_id = 1;
|
|
||||||
uint32 signal = 2;
|
|
||||||
oneof pid_or_all {
|
|
||||||
bool all = 3;
|
|
||||||
uint32 pid = 4;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
message EventsRequest {
|
|
||||||
}
|
|
||||||
|
|
||||||
message ExecRequest {
|
|
||||||
// ContainerID specifies the container in which to exec the process.
|
|
||||||
string container_id = 1;
|
|
||||||
bool terminal = 2;
|
|
||||||
string stdin = 3;
|
|
||||||
string stdout = 4;
|
|
||||||
string stderr = 5;
|
|
||||||
|
|
||||||
// Spec for starting a process in the target container.
|
|
||||||
//
|
|
||||||
// For runc, this is a process spec, for example.
|
|
||||||
google.protobuf.Any spec = 6;
|
|
||||||
}
|
|
||||||
|
|
||||||
message ExecResponse {
|
|
||||||
uint32 pid = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message PtyRequest {
|
|
||||||
string container_id = 1;
|
|
||||||
uint32 pid = 2;
|
|
||||||
uint32 width = 3;
|
|
||||||
uint32 height = 4;
|
|
||||||
}
|
|
||||||
|
|
||||||
message CloseStdinRequest {
|
|
||||||
string container_id = 1;
|
|
||||||
uint32 pid = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
message PauseRequest {
|
|
||||||
string container_id = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message ResumeRequest {
|
|
||||||
string container_id = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message ProcessesRequest {
|
|
||||||
string container_id = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message ProcessesResponse{
|
|
||||||
repeated containerd.v1.types.Process processes = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message CheckpointRequest {
|
|
||||||
string container_id = 1;
|
|
||||||
bool allow_tcp = 2;
|
|
||||||
bool allow_unix_sockets = 3;
|
|
||||||
bool allow_terminal = 4;
|
|
||||||
bool file_locks = 5;
|
|
||||||
repeated string empty_namespaces = 6;
|
|
||||||
string parent_checkpoint = 7 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
|
||||||
bool exit = 8;
|
|
||||||
}
|
|
||||||
|
|
||||||
message CheckpointResponse {
|
|
||||||
repeated types.Descriptor descriptors = 1;
|
|
||||||
}
|
|
1406
vendor/github.com/containerd/containerd/api/services/images/images.pb.go
generated
vendored
1406
vendor/github.com/containerd/containerd/api/services/images/images.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
78
vendor/github.com/containerd/containerd/api/services/images/images.proto
generated
vendored
78
vendor/github.com/containerd/containerd/api/services/images/images.proto
generated
vendored
@ -1,78 +0,0 @@
|
|||||||
syntax = "proto3";
|
|
||||||
|
|
||||||
package containerd.v1;
|
|
||||||
|
|
||||||
import "gogoproto/gogo.proto";
|
|
||||||
import "google/protobuf/empty.proto";
|
|
||||||
import "github.com/containerd/containerd/api/types/mount/mount.proto";
|
|
||||||
import "github.com/containerd/containerd/api/types/descriptor/descriptor.proto";
|
|
||||||
|
|
||||||
// Images is a service that allows one to register images with containerd.
|
|
||||||
//
|
|
||||||
// In containerd, an image is merely the mapping of a name to a content root,
|
|
||||||
// described by a descriptor. The behavior and state of image is purely
|
|
||||||
// dictated by the type of the descriptor.
|
|
||||||
//
|
|
||||||
// From the perspective of this service, these references are mostly shallow,
|
|
||||||
// in that the existence of the required content won't be validated until
|
|
||||||
// required by consuming services.
|
|
||||||
//
|
|
||||||
// As such, this can really be considered a "metadata service".
|
|
||||||
service Images {
|
|
||||||
// Get returns an image by name.
|
|
||||||
rpc Get(GetRequest) returns (GetResponse);
|
|
||||||
|
|
||||||
// List returns a list of all images known to containerd.
|
|
||||||
rpc List(ListRequest) returns (ListResponse);
|
|
||||||
|
|
||||||
// Put assigns the name to a given target image based on the provided
|
|
||||||
// image.
|
|
||||||
rpc Put(PutRequest) returns (google.protobuf.Empty);
|
|
||||||
|
|
||||||
// Delete deletes the image by name.
|
|
||||||
rpc Delete(DeleteRequest) returns (google.protobuf.Empty);
|
|
||||||
}
|
|
||||||
|
|
||||||
message Image {
|
|
||||||
string name = 1;
|
|
||||||
string labels = 2;
|
|
||||||
types.Descriptor target = 3 [(gogoproto.nullable) = false];
|
|
||||||
}
|
|
||||||
|
|
||||||
message GetRequest {
|
|
||||||
string name = 1;
|
|
||||||
|
|
||||||
// TODO(stevvooe): Consider that we may want to have multiple images under
|
|
||||||
// the same name or multiple names for the same image. This mapping could
|
|
||||||
// be truly many to many but we'll need a way to identify an entry.
|
|
||||||
//
|
|
||||||
// For now, we consider it unique but an intermediary index could be
|
|
||||||
// created to allow for a dispatch of images.
|
|
||||||
}
|
|
||||||
|
|
||||||
message GetResponse {
|
|
||||||
Image image = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message PutRequest {
|
|
||||||
Image image = 1 [(gogoproto.nullable) = false];
|
|
||||||
}
|
|
||||||
|
|
||||||
message ListRequest {
|
|
||||||
// TODO(stevvooe): empty for now, need to ad filtration
|
|
||||||
// Some common use cases we might consider:
|
|
||||||
//
|
|
||||||
// 1. Select by multiple names.
|
|
||||||
// 2. Select by platform.
|
|
||||||
// 3. Select by annotations.
|
|
||||||
}
|
|
||||||
|
|
||||||
message ListResponse {
|
|
||||||
repeated Image images = 1 [(gogoproto.nullable) = false];
|
|
||||||
|
|
||||||
// TODO(stevvooe): Add pagination.
|
|
||||||
}
|
|
||||||
|
|
||||||
message DeleteRequest {
|
|
||||||
string name = 1;
|
|
||||||
}
|
|
2190
vendor/github.com/containerd/containerd/api/services/images/v1/images.pb.go
generated
vendored
Normal file
2190
vendor/github.com/containerd/containerd/api/services/images/v1/images.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
117
vendor/github.com/containerd/containerd/api/services/images/v1/images.proto
generated
vendored
Normal file
117
vendor/github.com/containerd/containerd/api/services/images/v1/images.proto
generated
vendored
Normal file
@ -0,0 +1,117 @@
|
|||||||
|
syntax = "proto3";
|
||||||
|
|
||||||
|
package containerd.services.images.v1;
|
||||||
|
|
||||||
|
import "gogoproto/gogo.proto";
|
||||||
|
import "google/protobuf/empty.proto";
|
||||||
|
import "google/protobuf/field_mask.proto";
|
||||||
|
import "google/protobuf/timestamp.proto";
|
||||||
|
import "github.com/containerd/containerd/api/types/descriptor.proto";
|
||||||
|
|
||||||
|
option go_package = "github.com/containerd/containerd/api/services/images/v1;images";
|
||||||
|
|
||||||
|
// Images is a service that allows one to register images with containerd.
|
||||||
|
//
|
||||||
|
// In containerd, an image is merely the mapping of a name to a content root,
|
||||||
|
// described by a descriptor. The behavior and state of image is purely
|
||||||
|
// dictated by the type of the descriptor.
|
||||||
|
//
|
||||||
|
// From the perspective of this service, these references are mostly shallow,
|
||||||
|
// in that the existence of the required content won't be validated until
|
||||||
|
// required by consuming services.
|
||||||
|
//
|
||||||
|
// As such, this can really be considered a "metadata service".
|
||||||
|
service Images {
|
||||||
|
// Get returns an image by name.
|
||||||
|
rpc Get(GetImageRequest) returns (GetImageResponse);
|
||||||
|
|
||||||
|
// List returns a list of all images known to containerd.
|
||||||
|
rpc List(ListImagesRequest) returns (ListImagesResponse);
|
||||||
|
|
||||||
|
// Create an image record in the metadata store.
|
||||||
|
//
|
||||||
|
// The name of the image must be unique.
|
||||||
|
rpc Create(CreateImageRequest) returns (CreateImageResponse);
|
||||||
|
|
||||||
|
// Update assigns the name to a given target image based on the provided
|
||||||
|
// image.
|
||||||
|
rpc Update(UpdateImageRequest) returns (UpdateImageResponse);
|
||||||
|
|
||||||
|
// Delete deletes the image by name.
|
||||||
|
rpc Delete(DeleteImageRequest) returns (google.protobuf.Empty);
|
||||||
|
}
|
||||||
|
|
||||||
|
message Image {
|
||||||
|
// Name provides a unique name for the image.
|
||||||
|
//
|
||||||
|
// Containerd treats this as the primary identifier.
|
||||||
|
string name = 1;
|
||||||
|
|
||||||
|
// Labels provides free form labels for the image. These are runtime only
|
||||||
|
// and do not get inherited into the package image in any way.
|
||||||
|
//
|
||||||
|
// Labels may be updated using the field mask.
|
||||||
|
map<string, string> labels = 2;
|
||||||
|
|
||||||
|
// Target describes the content entry point of the image.
|
||||||
|
containerd.types.Descriptor target = 3 [(gogoproto.nullable) = false];
|
||||||
|
|
||||||
|
// CreatedAt is the time the image was first created.
|
||||||
|
google.protobuf.Timestamp created_at = 7 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||||
|
|
||||||
|
// UpdatedAt is the last time the image was mutated.
|
||||||
|
google.protobuf.Timestamp updated_at = 8 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||||
|
}
|
||||||
|
|
||||||
|
message GetImageRequest {
|
||||||
|
string name = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message GetImageResponse {
|
||||||
|
Image image = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message CreateImageRequest {
|
||||||
|
Image image = 1 [(gogoproto.nullable) = false];
|
||||||
|
}
|
||||||
|
|
||||||
|
message CreateImageResponse {
|
||||||
|
Image image = 1 [(gogoproto.nullable) = false];
|
||||||
|
}
|
||||||
|
|
||||||
|
message UpdateImageRequest {
|
||||||
|
// Image provides a full or partial image for update.
|
||||||
|
//
|
||||||
|
// The name field must be set or an error will be returned.
|
||||||
|
Image image = 1 [(gogoproto.nullable) = false];
|
||||||
|
|
||||||
|
// UpdateMask specifies which fields to perform the update on. If empty,
|
||||||
|
// the operation applies to all fields.
|
||||||
|
google.protobuf.FieldMask update_mask = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message UpdateImageResponse {
|
||||||
|
Image image = 1 [(gogoproto.nullable) = false];
|
||||||
|
}
|
||||||
|
|
||||||
|
message ListImagesRequest {
|
||||||
|
// Filters contains one or more filters using the syntax defined in the
|
||||||
|
// containerd filter package.
|
||||||
|
//
|
||||||
|
// The returned result will be those that match any of the provided
|
||||||
|
// filters. Expanded, images that match the following will be
|
||||||
|
// returned:
|
||||||
|
//
|
||||||
|
// filters[0] or filters[1] or ... or filters[n-1] or filters[n]
|
||||||
|
//
|
||||||
|
// If filters is zero-length or nil, all items will be returned.
|
||||||
|
repeated string filters = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ListImagesResponse {
|
||||||
|
repeated Image images = 1 [(gogoproto.nullable) = false];
|
||||||
|
}
|
||||||
|
|
||||||
|
message DeleteImageRequest {
|
||||||
|
string name = 1;
|
||||||
|
}
|
@ -1,12 +1,12 @@
|
|||||||
// Code generated by protoc-gen-gogo.
|
// Code generated by protoc-gen-gogo.
|
||||||
// source: github.com/containerd/containerd/api/services/namespaces/namespace.proto
|
// source: github.com/containerd/containerd/api/services/namespaces/v1/namespace.proto
|
||||||
// DO NOT EDIT!
|
// DO NOT EDIT!
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Package namespaces is a generated protocol buffer package.
|
Package namespaces is a generated protocol buffer package.
|
||||||
|
|
||||||
It is generated from these files:
|
It is generated from these files:
|
||||||
github.com/containerd/containerd/api/services/namespaces/namespace.proto
|
github.com/containerd/containerd/api/services/namespaces/v1/namespace.proto
|
||||||
|
|
||||||
It has these top-level messages:
|
It has these top-level messages:
|
||||||
Namespace
|
Namespace
|
||||||
@ -152,16 +152,16 @@ func (*DeleteNamespaceRequest) ProtoMessage() {}
|
|||||||
func (*DeleteNamespaceRequest) Descriptor() ([]byte, []int) { return fileDescriptorNamespace, []int{9} }
|
func (*DeleteNamespaceRequest) Descriptor() ([]byte, []int) { return fileDescriptorNamespace, []int{9} }
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
proto.RegisterType((*Namespace)(nil), "containerd.v1.namespaces.Namespace")
|
proto.RegisterType((*Namespace)(nil), "containerd.services.namespaces.v1.Namespace")
|
||||||
proto.RegisterType((*GetNamespaceRequest)(nil), "containerd.v1.namespaces.GetNamespaceRequest")
|
proto.RegisterType((*GetNamespaceRequest)(nil), "containerd.services.namespaces.v1.GetNamespaceRequest")
|
||||||
proto.RegisterType((*GetNamespaceResponse)(nil), "containerd.v1.namespaces.GetNamespaceResponse")
|
proto.RegisterType((*GetNamespaceResponse)(nil), "containerd.services.namespaces.v1.GetNamespaceResponse")
|
||||||
proto.RegisterType((*ListNamespacesRequest)(nil), "containerd.v1.namespaces.ListNamespacesRequest")
|
proto.RegisterType((*ListNamespacesRequest)(nil), "containerd.services.namespaces.v1.ListNamespacesRequest")
|
||||||
proto.RegisterType((*ListNamespacesResponse)(nil), "containerd.v1.namespaces.ListNamespacesResponse")
|
proto.RegisterType((*ListNamespacesResponse)(nil), "containerd.services.namespaces.v1.ListNamespacesResponse")
|
||||||
proto.RegisterType((*CreateNamespaceRequest)(nil), "containerd.v1.namespaces.CreateNamespaceRequest")
|
proto.RegisterType((*CreateNamespaceRequest)(nil), "containerd.services.namespaces.v1.CreateNamespaceRequest")
|
||||||
proto.RegisterType((*CreateNamespaceResponse)(nil), "containerd.v1.namespaces.CreateNamespaceResponse")
|
proto.RegisterType((*CreateNamespaceResponse)(nil), "containerd.services.namespaces.v1.CreateNamespaceResponse")
|
||||||
proto.RegisterType((*UpdateNamespaceRequest)(nil), "containerd.v1.namespaces.UpdateNamespaceRequest")
|
proto.RegisterType((*UpdateNamespaceRequest)(nil), "containerd.services.namespaces.v1.UpdateNamespaceRequest")
|
||||||
proto.RegisterType((*UpdateNamespaceResponse)(nil), "containerd.v1.namespaces.UpdateNamespaceResponse")
|
proto.RegisterType((*UpdateNamespaceResponse)(nil), "containerd.services.namespaces.v1.UpdateNamespaceResponse")
|
||||||
proto.RegisterType((*DeleteNamespaceRequest)(nil), "containerd.v1.namespaces.DeleteNamespaceRequest")
|
proto.RegisterType((*DeleteNamespaceRequest)(nil), "containerd.services.namespaces.v1.DeleteNamespaceRequest")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
@ -192,7 +192,7 @@ func NewNamespacesClient(cc *grpc.ClientConn) NamespacesClient {
|
|||||||
|
|
||||||
func (c *namespacesClient) Get(ctx context.Context, in *GetNamespaceRequest, opts ...grpc.CallOption) (*GetNamespaceResponse, error) {
|
func (c *namespacesClient) Get(ctx context.Context, in *GetNamespaceRequest, opts ...grpc.CallOption) (*GetNamespaceResponse, error) {
|
||||||
out := new(GetNamespaceResponse)
|
out := new(GetNamespaceResponse)
|
||||||
err := grpc.Invoke(ctx, "/containerd.v1.namespaces.Namespaces/Get", in, out, c.cc, opts...)
|
err := grpc.Invoke(ctx, "/containerd.services.namespaces.v1.Namespaces/Get", in, out, c.cc, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -201,7 +201,7 @@ func (c *namespacesClient) Get(ctx context.Context, in *GetNamespaceRequest, opt
|
|||||||
|
|
||||||
func (c *namespacesClient) List(ctx context.Context, in *ListNamespacesRequest, opts ...grpc.CallOption) (*ListNamespacesResponse, error) {
|
func (c *namespacesClient) List(ctx context.Context, in *ListNamespacesRequest, opts ...grpc.CallOption) (*ListNamespacesResponse, error) {
|
||||||
out := new(ListNamespacesResponse)
|
out := new(ListNamespacesResponse)
|
||||||
err := grpc.Invoke(ctx, "/containerd.v1.namespaces.Namespaces/List", in, out, c.cc, opts...)
|
err := grpc.Invoke(ctx, "/containerd.services.namespaces.v1.Namespaces/List", in, out, c.cc, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -210,7 +210,7 @@ func (c *namespacesClient) List(ctx context.Context, in *ListNamespacesRequest,
|
|||||||
|
|
||||||
func (c *namespacesClient) Create(ctx context.Context, in *CreateNamespaceRequest, opts ...grpc.CallOption) (*CreateNamespaceResponse, error) {
|
func (c *namespacesClient) Create(ctx context.Context, in *CreateNamespaceRequest, opts ...grpc.CallOption) (*CreateNamespaceResponse, error) {
|
||||||
out := new(CreateNamespaceResponse)
|
out := new(CreateNamespaceResponse)
|
||||||
err := grpc.Invoke(ctx, "/containerd.v1.namespaces.Namespaces/Create", in, out, c.cc, opts...)
|
err := grpc.Invoke(ctx, "/containerd.services.namespaces.v1.Namespaces/Create", in, out, c.cc, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -219,7 +219,7 @@ func (c *namespacesClient) Create(ctx context.Context, in *CreateNamespaceReques
|
|||||||
|
|
||||||
func (c *namespacesClient) Update(ctx context.Context, in *UpdateNamespaceRequest, opts ...grpc.CallOption) (*UpdateNamespaceResponse, error) {
|
func (c *namespacesClient) Update(ctx context.Context, in *UpdateNamespaceRequest, opts ...grpc.CallOption) (*UpdateNamespaceResponse, error) {
|
||||||
out := new(UpdateNamespaceResponse)
|
out := new(UpdateNamespaceResponse)
|
||||||
err := grpc.Invoke(ctx, "/containerd.v1.namespaces.Namespaces/Update", in, out, c.cc, opts...)
|
err := grpc.Invoke(ctx, "/containerd.services.namespaces.v1.Namespaces/Update", in, out, c.cc, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -228,7 +228,7 @@ func (c *namespacesClient) Update(ctx context.Context, in *UpdateNamespaceReques
|
|||||||
|
|
||||||
func (c *namespacesClient) Delete(ctx context.Context, in *DeleteNamespaceRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) {
|
func (c *namespacesClient) Delete(ctx context.Context, in *DeleteNamespaceRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) {
|
||||||
out := new(google_protobuf1.Empty)
|
out := new(google_protobuf1.Empty)
|
||||||
err := grpc.Invoke(ctx, "/containerd.v1.namespaces.Namespaces/Delete", in, out, c.cc, opts...)
|
err := grpc.Invoke(ctx, "/containerd.services.namespaces.v1.Namespaces/Delete", in, out, c.cc, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -259,7 +259,7 @@ func _Namespaces_Get_Handler(srv interface{}, ctx context.Context, dec func(inte
|
|||||||
}
|
}
|
||||||
info := &grpc.UnaryServerInfo{
|
info := &grpc.UnaryServerInfo{
|
||||||
Server: srv,
|
Server: srv,
|
||||||
FullMethod: "/containerd.v1.namespaces.Namespaces/Get",
|
FullMethod: "/containerd.services.namespaces.v1.Namespaces/Get",
|
||||||
}
|
}
|
||||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
return srv.(NamespacesServer).Get(ctx, req.(*GetNamespaceRequest))
|
return srv.(NamespacesServer).Get(ctx, req.(*GetNamespaceRequest))
|
||||||
@ -277,7 +277,7 @@ func _Namespaces_List_Handler(srv interface{}, ctx context.Context, dec func(int
|
|||||||
}
|
}
|
||||||
info := &grpc.UnaryServerInfo{
|
info := &grpc.UnaryServerInfo{
|
||||||
Server: srv,
|
Server: srv,
|
||||||
FullMethod: "/containerd.v1.namespaces.Namespaces/List",
|
FullMethod: "/containerd.services.namespaces.v1.Namespaces/List",
|
||||||
}
|
}
|
||||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
return srv.(NamespacesServer).List(ctx, req.(*ListNamespacesRequest))
|
return srv.(NamespacesServer).List(ctx, req.(*ListNamespacesRequest))
|
||||||
@ -295,7 +295,7 @@ func _Namespaces_Create_Handler(srv interface{}, ctx context.Context, dec func(i
|
|||||||
}
|
}
|
||||||
info := &grpc.UnaryServerInfo{
|
info := &grpc.UnaryServerInfo{
|
||||||
Server: srv,
|
Server: srv,
|
||||||
FullMethod: "/containerd.v1.namespaces.Namespaces/Create",
|
FullMethod: "/containerd.services.namespaces.v1.Namespaces/Create",
|
||||||
}
|
}
|
||||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
return srv.(NamespacesServer).Create(ctx, req.(*CreateNamespaceRequest))
|
return srv.(NamespacesServer).Create(ctx, req.(*CreateNamespaceRequest))
|
||||||
@ -313,7 +313,7 @@ func _Namespaces_Update_Handler(srv interface{}, ctx context.Context, dec func(i
|
|||||||
}
|
}
|
||||||
info := &grpc.UnaryServerInfo{
|
info := &grpc.UnaryServerInfo{
|
||||||
Server: srv,
|
Server: srv,
|
||||||
FullMethod: "/containerd.v1.namespaces.Namespaces/Update",
|
FullMethod: "/containerd.services.namespaces.v1.Namespaces/Update",
|
||||||
}
|
}
|
||||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
return srv.(NamespacesServer).Update(ctx, req.(*UpdateNamespaceRequest))
|
return srv.(NamespacesServer).Update(ctx, req.(*UpdateNamespaceRequest))
|
||||||
@ -331,7 +331,7 @@ func _Namespaces_Delete_Handler(srv interface{}, ctx context.Context, dec func(i
|
|||||||
}
|
}
|
||||||
info := &grpc.UnaryServerInfo{
|
info := &grpc.UnaryServerInfo{
|
||||||
Server: srv,
|
Server: srv,
|
||||||
FullMethod: "/containerd.v1.namespaces.Namespaces/Delete",
|
FullMethod: "/containerd.services.namespaces.v1.Namespaces/Delete",
|
||||||
}
|
}
|
||||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
return srv.(NamespacesServer).Delete(ctx, req.(*DeleteNamespaceRequest))
|
return srv.(NamespacesServer).Delete(ctx, req.(*DeleteNamespaceRequest))
|
||||||
@ -340,7 +340,7 @@ func _Namespaces_Delete_Handler(srv interface{}, ctx context.Context, dec func(i
|
|||||||
}
|
}
|
||||||
|
|
||||||
var _Namespaces_serviceDesc = grpc.ServiceDesc{
|
var _Namespaces_serviceDesc = grpc.ServiceDesc{
|
||||||
ServiceName: "containerd.v1.namespaces.Namespaces",
|
ServiceName: "containerd.services.namespaces.v1.Namespaces",
|
||||||
HandlerType: (*NamespacesServer)(nil),
|
HandlerType: (*NamespacesServer)(nil),
|
||||||
Methods: []grpc.MethodDesc{
|
Methods: []grpc.MethodDesc{
|
||||||
{
|
{
|
||||||
@ -365,7 +365,7 @@ var _Namespaces_serviceDesc = grpc.ServiceDesc{
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
Streams: []grpc.StreamDesc{},
|
Streams: []grpc.StreamDesc{},
|
||||||
Metadata: "github.com/containerd/containerd/api/services/namespaces/namespace.proto",
|
Metadata: "github.com/containerd/containerd/api/services/namespaces/v1/namespace.proto",
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Namespace) Marshal() (dAtA []byte, err error) {
|
func (m *Namespace) Marshal() (dAtA []byte, err error) {
|
||||||
@ -1967,42 +1967,44 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
proto.RegisterFile("github.com/containerd/containerd/api/services/namespaces/namespace.proto", fileDescriptorNamespace)
|
proto.RegisterFile("github.com/containerd/containerd/api/services/namespaces/v1/namespace.proto", fileDescriptorNamespace)
|
||||||
}
|
}
|
||||||
|
|
||||||
var fileDescriptorNamespace = []byte{
|
var fileDescriptorNamespace = []byte{
|
||||||
// 528 bytes of a gzipped FileDescriptorProto
|
// 547 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x54, 0xbd, 0x8e, 0xd3, 0x4c,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0x4d, 0x6f, 0xd3, 0x40,
|
||||||
0x14, 0xcd, 0x24, 0xf9, 0x2c, 0xe5, 0xba, 0xf9, 0x34, 0x04, 0x63, 0x19, 0xc9, 0x44, 0xa6, 0x59,
|
0x10, 0xcd, 0x26, 0xc1, 0x52, 0xc6, 0x17, 0xb4, 0x04, 0x13, 0x19, 0xc9, 0x04, 0x9f, 0x8a, 0x54,
|
||||||
0x24, 0x18, 0xb3, 0xa1, 0xe1, 0xa7, 0x5b, 0x58, 0x02, 0xd2, 0x42, 0x61, 0x89, 0x7a, 0x35, 0x4e,
|
0xad, 0xd5, 0x20, 0x41, 0x3f, 0x6e, 0x85, 0xb6, 0x07, 0x0a, 0x42, 0x96, 0xb8, 0xc0, 0x01, 0x9c,
|
||||||
0x26, 0xc6, 0x8a, 0xff, 0xf0, 0x8c, 0x23, 0xa5, 0xe3, 0x0d, 0x78, 0x03, 0x1a, 0x5e, 0x26, 0x25,
|
0x64, 0xe3, 0x9a, 0x38, 0xb6, 0xf1, 0xae, 0x2d, 0x45, 0x1c, 0xe0, 0xdf, 0x70, 0xe1, 0x87, 0xe4,
|
||||||
0x25, 0x15, 0x62, 0xf3, 0x24, 0xc8, 0x63, 0x27, 0xce, 0x6e, 0x1c, 0x2b, 0x2b, 0x85, 0xee, 0x8e,
|
0xc8, 0x91, 0x13, 0x6a, 0xf3, 0x4b, 0xd0, 0xae, 0x9d, 0x38, 0x34, 0x46, 0xb8, 0x81, 0x72, 0x9b,
|
||||||
0x7d, 0x8e, 0xcf, 0xb9, 0xd7, 0xe7, 0x0e, 0xbc, 0xf3, 0x7c, 0xf1, 0x39, 0x73, 0xc9, 0x38, 0x0e,
|
0xb1, 0xf7, 0xcd, 0x7b, 0x3b, 0x7a, 0xcf, 0x86, 0x67, 0xae, 0xc7, 0xcf, 0x92, 0x3e, 0x19, 0x84,
|
||||||
0xed, 0x71, 0x1c, 0x09, 0xea, 0x47, 0x2c, 0x9d, 0x6c, 0x97, 0x34, 0xf1, 0x6d, 0xce, 0xd2, 0xb9,
|
0x13, 0x6b, 0x10, 0x06, 0xdc, 0xf1, 0x02, 0x1a, 0x0f, 0x57, 0x4b, 0x27, 0xf2, 0x2c, 0x46, 0xe3,
|
||||||
0x3f, 0x66, 0xdc, 0x8e, 0x68, 0xc8, 0x78, 0x42, 0xaf, 0x95, 0x24, 0x49, 0x63, 0x11, 0x63, 0xbd,
|
0xd4, 0x1b, 0x50, 0x66, 0x05, 0xce, 0x84, 0xb2, 0xc8, 0x11, 0x65, 0xba, 0x53, 0x74, 0x24, 0x8a,
|
||||||
0xe2, 0x90, 0xf9, 0x29, 0xa9, 0x90, 0x46, 0xdf, 0x8b, 0xbd, 0x58, 0x82, 0xec, 0xbc, 0x2a, 0xf0,
|
0x43, 0x1e, 0xe2, 0xfb, 0x05, 0x8c, 0x2c, 0x20, 0xa4, 0x80, 0x90, 0x74, 0x47, 0x6f, 0xbb, 0xa1,
|
||||||
0xc6, 0x7d, 0x2f, 0x8e, 0xbd, 0x80, 0xd9, 0xf2, 0xe4, 0x66, 0x53, 0x9b, 0x85, 0x89, 0x58, 0x94,
|
0x1b, 0xca, 0xd3, 0x96, 0xa8, 0x32, 0xa0, 0x7e, 0xd7, 0x0d, 0x43, 0xd7, 0xa7, 0x96, 0xec, 0xfa,
|
||||||
0x2f, 0x07, 0x37, 0x5f, 0x4e, 0x7d, 0x16, 0x4c, 0x2e, 0x43, 0xca, 0x67, 0x05, 0xc2, 0xfa, 0x81,
|
0xc9, 0xc8, 0xa2, 0x93, 0x88, 0x4f, 0xf3, 0x97, 0xdd, 0xcb, 0x2f, 0x47, 0x1e, 0xf5, 0x87, 0x6f,
|
||||||
0xa0, 0xf7, 0x71, 0xad, 0x81, 0x31, 0x74, 0x73, 0x41, 0x1d, 0x0d, 0xd0, 0x49, 0xcf, 0x91, 0x35,
|
0x27, 0x0e, 0x1b, 0x67, 0x27, 0xcc, 0xaf, 0x08, 0x5a, 0x2f, 0x16, 0x34, 0x18, 0x43, 0x53, 0x70,
|
||||||
0x1e, 0x81, 0x12, 0x50, 0x97, 0x05, 0x5c, 0x6f, 0x0f, 0x3a, 0x27, 0xea, 0xd0, 0x26, 0xfb, 0x1c,
|
0x76, 0x50, 0x17, 0x6d, 0xb5, 0x6c, 0x59, 0xe3, 0x97, 0xa0, 0xf8, 0x4e, 0x9f, 0xfa, 0xac, 0x53,
|
||||||
0x92, 0xcd, 0x87, 0xc8, 0x85, 0x64, 0x9c, 0x47, 0x22, 0x5d, 0x38, 0x25, 0xdd, 0x78, 0x01, 0xea,
|
0xef, 0x36, 0xb6, 0xd4, 0xde, 0x2e, 0xf9, 0xa3, 0x54, 0xb2, 0x9c, 0x48, 0x4e, 0x25, 0xf4, 0x28,
|
||||||
0xd6, 0x63, 0xfc, 0x3f, 0x74, 0x66, 0x6c, 0x51, 0x4a, 0xe5, 0x25, 0xee, 0xc3, 0x7f, 0x73, 0x1a,
|
0xe0, 0xf1, 0xd4, 0xce, 0xe7, 0xe8, 0x7b, 0xa0, 0xae, 0x3c, 0xc6, 0x37, 0xa1, 0x31, 0xa6, 0xd3,
|
||||||
0x64, 0x4c, 0x6f, 0xcb, 0x67, 0xc5, 0xe1, 0x65, 0xfb, 0x39, 0xb2, 0x1e, 0xc1, 0x9d, 0x11, 0x13,
|
0x9c, 0x53, 0x94, 0xb8, 0x0d, 0x37, 0x52, 0xc7, 0x4f, 0x68, 0xa7, 0x2e, 0x9f, 0x65, 0xcd, 0x7e,
|
||||||
0x9b, 0xcf, 0x3b, 0xec, 0x4b, 0xc6, 0xb8, 0xa8, 0xb3, 0x6b, 0x5d, 0x42, 0xff, 0x3a, 0x94, 0x27,
|
0x7d, 0x17, 0x99, 0x0f, 0xe0, 0xd6, 0x09, 0xe5, 0xcb, 0xf1, 0x36, 0xfd, 0x90, 0x50, 0xc6, 0xcb,
|
||||||
0x71, 0xc4, 0xf3, 0x36, 0x7a, 0x1b, 0xa7, 0x92, 0xa0, 0x0e, 0x1f, 0x1e, 0xd0, 0xc9, 0x59, 0x77,
|
0x74, 0x9b, 0x67, 0xd0, 0xfe, 0xf5, 0x28, 0x8b, 0xc2, 0x80, 0x89, 0xfb, 0xb4, 0x96, 0x62, 0x25,
|
||||||
0xf9, 0xfb, 0x41, 0xcb, 0xa9, 0xb8, 0x96, 0x0d, 0x77, 0x2f, 0x7c, 0x5e, 0x29, 0xf0, 0xb5, 0x1b,
|
0x40, 0xed, 0x6d, 0x5f, 0xe5, 0x4a, 0x87, 0xcd, 0xd9, 0x8f, 0x7b, 0x35, 0xbb, 0x18, 0x62, 0x5a,
|
||||||
0x0d, 0x94, 0xa9, 0x1f, 0x08, 0x96, 0x96, 0x7e, 0xca, 0x93, 0x35, 0x06, 0xed, 0x26, 0xa1, 0xf4,
|
0x70, 0xfb, 0xd4, 0x63, 0x05, 0x15, 0x5b, 0xc8, 0xd2, 0x40, 0x19, 0x79, 0x3e, 0xa7, 0x71, 0x2e,
|
||||||
0xf4, 0x1e, 0xa0, 0xd2, 0xd4, 0x91, 0x1c, 0xef, 0x2d, 0x4c, 0x6d, 0x91, 0x2d, 0x0a, 0xda, 0xeb,
|
0x2c, 0xef, 0x4c, 0x1f, 0xb4, 0xcb, 0x80, 0x5c, 0x9c, 0x0d, 0x50, 0xd0, 0x76, 0x90, 0x5c, 0xf8,
|
||||||
0x94, 0x51, 0xc1, 0x76, 0x86, 0x74, 0xb4, 0xc6, 0x5d, 0xb8, 0xb7, 0x23, 0x71, 0xec, 0xe1, 0x7e,
|
0x26, 0xea, 0x56, 0xa6, 0x98, 0xef, 0x41, 0x7b, 0x12, 0x53, 0x87, 0xd3, 0xb5, 0xb5, 0xfd, 0xfb,
|
||||||
0x47, 0xa0, 0x7d, 0x4a, 0x26, 0xff, 0xb2, 0x0f, 0xfc, 0x0a, 0xd4, 0x4c, 0x4a, 0xc8, 0x3d, 0x90,
|
0x55, 0x8c, 0xe1, 0xce, 0x1a, 0xd7, 0xb5, 0xed, 0xfd, 0x0b, 0x02, 0xed, 0x55, 0x34, 0xfc, 0x2f,
|
||||||
0x61, 0x53, 0x87, 0x06, 0x29, 0x56, 0x85, 0xac, 0x57, 0x85, 0xbc, 0xcd, 0x57, 0xe5, 0x03, 0xe5,
|
0x37, 0xc3, 0x07, 0xa0, 0x26, 0x92, 0x4b, 0xa6, 0x47, 0x3a, 0x53, 0xed, 0xe9, 0x24, 0x0b, 0x18,
|
||||||
0x33, 0x07, 0x0a, 0x78, 0x5e, 0xe7, 0x43, 0xd8, 0xf1, 0x77, 0xec, 0x21, 0x3c, 0x06, 0xed, 0x0d,
|
0x59, 0x04, 0x8c, 0x1c, 0x8b, 0x80, 0x3d, 0x77, 0xd8, 0xd8, 0x86, 0xec, 0xb8, 0xa8, 0xc5, 0x5a,
|
||||||
0x0b, 0x58, 0xcd, 0x0c, 0x6a, 0x02, 0x3f, 0xfc, 0xd6, 0x05, 0xa8, 0xb2, 0x85, 0x27, 0xd0, 0x19,
|
0xd6, 0x84, 0x5e, 0xdb, 0x5a, 0xb6, 0x41, 0x7b, 0x4a, 0x7d, 0x5a, 0xb2, 0x95, 0x92, 0x98, 0xf4,
|
||||||
0x31, 0x81, 0x9f, 0xec, 0x57, 0xae, 0xd9, 0x24, 0x83, 0x1c, 0x0a, 0x2f, 0x7b, 0xf5, 0xa1, 0x9b,
|
0xce, 0x9b, 0x00, 0x85, 0x11, 0x71, 0x0a, 0x8d, 0x13, 0xca, 0xf1, 0xa3, 0x0a, 0x12, 0x4a, 0x82,
|
||||||
0x67, 0x1a, 0x37, 0x5c, 0x06, 0xb5, 0x4b, 0x62, 0x3c, 0x3d, 0x9c, 0x50, 0x4a, 0x85, 0xa0, 0x14,
|
0xa8, 0x3f, 0xbe, 0x32, 0x2e, 0x5f, 0xc3, 0x47, 0x68, 0x8a, 0x48, 0xe0, 0x2a, 0x5f, 0x97, 0xd2,
|
||||||
0xb1, 0xc3, 0x0d, 0xdc, 0xfa, 0xec, 0x1b, 0xa7, 0xb7, 0x60, 0x54, 0x72, 0xc5, 0x0f, 0x6e, 0x92,
|
0xb0, 0xe9, 0x7b, 0x1b, 0x20, 0x73, 0xf2, 0x4f, 0xa0, 0x64, 0xae, 0xc5, 0x55, 0x86, 0x94, 0x87,
|
||||||
0xab, 0x8f, 0x68, 0x93, 0xdc, 0xbe, 0xd0, 0x38, 0xa0, 0x14, 0xff, 0xba, 0x49, 0xae, 0x3e, 0x0d,
|
0x49, 0xdf, 0xdf, 0x04, 0x5a, 0x08, 0xc8, 0xfc, 0x51, 0x49, 0x40, 0xb9, 0xe7, 0x2b, 0x09, 0xf8,
|
||||||
0x86, 0xb6, 0x93, 0xd9, 0xf3, 0xfc, 0xee, 0x3f, 0xd3, 0x97, 0x57, 0x66, 0xeb, 0xd7, 0x95, 0xd9,
|
0x9d, 0x0b, 0xdf, 0x80, 0x92, 0x79, 0xa6, 0x92, 0x80, 0x72, 0x7b, 0xe9, 0xda, 0x5a, 0x1a, 0x8e,
|
||||||
0xfa, 0xba, 0x32, 0xd1, 0x72, 0x65, 0xa2, 0x9f, 0x2b, 0x13, 0xfd, 0x59, 0x99, 0xc8, 0x55, 0x24,
|
0xc4, 0xbf, 0xe8, 0xf0, 0xdd, 0xec, 0xc2, 0xa8, 0x7d, 0xbf, 0x30, 0x6a, 0x9f, 0xe7, 0x06, 0x9a,
|
||||||
0xf2, 0xd9, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xf0, 0x2e, 0xc3, 0x29, 0xaf, 0x06, 0x00, 0x00,
|
0xcd, 0x0d, 0xf4, 0x6d, 0x6e, 0xa0, 0xf3, 0xb9, 0x81, 0x5e, 0x1f, 0xff, 0xc5, 0x2f, 0xf4, 0xa0,
|
||||||
|
0xe8, 0xfa, 0x8a, 0x64, 0x7c, 0xf8, 0x33, 0x00, 0x00, 0xff, 0xff, 0xbf, 0xe8, 0x4d, 0xe1, 0x93,
|
||||||
|
0x07, 0x00, 0x00,
|
||||||
}
|
}
|
@ -1,11 +1,13 @@
|
|||||||
syntax = "proto3";
|
syntax = "proto3";
|
||||||
|
|
||||||
package containerd.v1.namespaces;
|
package containerd.services.namespaces.v1;
|
||||||
|
|
||||||
import "gogoproto/gogo.proto";
|
import "gogoproto/gogo.proto";
|
||||||
import "google/protobuf/empty.proto";
|
import "google/protobuf/empty.proto";
|
||||||
import "google/protobuf/field_mask.proto";
|
import "google/protobuf/field_mask.proto";
|
||||||
|
|
||||||
|
option go_package = "github.com/containerd/containerd/api/services/namespaces/v1;namespaces";
|
||||||
|
|
||||||
// Namespaces provides the ability to manipulate containerd namespaces.
|
// Namespaces provides the ability to manipulate containerd namespaces.
|
||||||
//
|
//
|
||||||
// All objects in the system are required to be a member of a namespace. If a
|
// All objects in the system are required to be a member of a namespace. If a
|
||||||
@ -44,7 +46,7 @@ message GetNamespaceResponse {
|
|||||||
}
|
}
|
||||||
|
|
||||||
message ListNamespacesRequest {
|
message ListNamespacesRequest {
|
||||||
string filter = 1; // TODO(stevvooe): Define a filtering syntax to make these queries.
|
string filter = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message ListNamespacesResponse {
|
message ListNamespacesResponse {
|
81
vendor/github.com/containerd/containerd/api/services/snapshot/snapshots.proto
generated
vendored
81
vendor/github.com/containerd/containerd/api/services/snapshot/snapshots.proto
generated
vendored
@ -1,81 +0,0 @@
|
|||||||
syntax = "proto3";
|
|
||||||
|
|
||||||
package containerd.v1.snapshot;
|
|
||||||
|
|
||||||
import "gogoproto/gogo.proto";
|
|
||||||
import "google/protobuf/empty.proto";
|
|
||||||
import "github.com/containerd/containerd/api/types/mount/mount.proto";
|
|
||||||
|
|
||||||
// Snapshot service manages snapshots
|
|
||||||
service Snapshot {
|
|
||||||
rpc Prepare(PrepareRequest) returns (MountsResponse);
|
|
||||||
rpc View(PrepareRequest) returns (MountsResponse);
|
|
||||||
rpc Mounts(MountsRequest) returns (MountsResponse);
|
|
||||||
rpc Commit(CommitRequest) returns (google.protobuf.Empty);
|
|
||||||
rpc Remove(RemoveRequest) returns (google.protobuf.Empty);
|
|
||||||
rpc Stat(StatRequest) returns (StatResponse);
|
|
||||||
rpc List(ListRequest) returns (stream ListResponse);
|
|
||||||
rpc Usage(UsageRequest) returns (UsageResponse);
|
|
||||||
// "Snapshot" prepares a new set of mounts from existing name
|
|
||||||
}
|
|
||||||
|
|
||||||
message PrepareRequest {
|
|
||||||
string key = 1;
|
|
||||||
string parent = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
message MountsRequest {
|
|
||||||
string key = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message MountsResponse {
|
|
||||||
repeated containerd.v1.types.Mount mounts = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message RemoveRequest {
|
|
||||||
string key = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message CommitRequest {
|
|
||||||
string name = 1;
|
|
||||||
string key = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
message StatRequest {
|
|
||||||
string key = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
enum Kind {
|
|
||||||
option (gogoproto.goproto_enum_prefix) = false;
|
|
||||||
option (gogoproto.enum_customname) = "Kind";
|
|
||||||
|
|
||||||
ACTIVE = 0 [(gogoproto.enumvalue_customname) = "KindActive"];
|
|
||||||
|
|
||||||
COMMITTED = 1 [(gogoproto.enumvalue_customname) = "KindCommitted"];
|
|
||||||
}
|
|
||||||
|
|
||||||
message Info {
|
|
||||||
string name = 1;
|
|
||||||
string parent = 2;
|
|
||||||
Kind kind = 3;
|
|
||||||
bool readonly = 4;
|
|
||||||
}
|
|
||||||
|
|
||||||
message StatResponse {
|
|
||||||
Info info = 1 [(gogoproto.nullable) = false];
|
|
||||||
}
|
|
||||||
|
|
||||||
message ListRequest{}
|
|
||||||
|
|
||||||
message ListResponse {
|
|
||||||
repeated Info info = 1 [(gogoproto.nullable) = false];
|
|
||||||
}
|
|
||||||
|
|
||||||
message UsageRequest {
|
|
||||||
string key = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message UsageResponse {
|
|
||||||
int64 inodes = 2;
|
|
||||||
int64 size = 1;
|
|
||||||
}
|
|
File diff suppressed because it is too large
Load Diff
104
vendor/github.com/containerd/containerd/api/services/snapshot/v1/snapshots.proto
generated
vendored
Normal file
104
vendor/github.com/containerd/containerd/api/services/snapshot/v1/snapshots.proto
generated
vendored
Normal file
@ -0,0 +1,104 @@
|
|||||||
|
syntax = "proto3";
|
||||||
|
|
||||||
|
package containerd.services.snapshots.v1;
|
||||||
|
|
||||||
|
import "gogoproto/gogo.proto";
|
||||||
|
import "google/protobuf/empty.proto";
|
||||||
|
import "github.com/containerd/containerd/api/types/mount.proto";
|
||||||
|
|
||||||
|
option go_package = "github.com/containerd/containerd/api/services/snapshot/v1;snapshot";
|
||||||
|
|
||||||
|
// Snapshot service manages snapshots
|
||||||
|
service Snapshots {
|
||||||
|
rpc Prepare(PrepareSnapshotRequest) returns (PrepareSnapshotResponse);
|
||||||
|
rpc View(ViewSnapshotRequest) returns (ViewSnapshotResponse);
|
||||||
|
rpc Mounts(MountsRequest) returns (MountsResponse);
|
||||||
|
rpc Commit(CommitSnapshotRequest) returns (google.protobuf.Empty);
|
||||||
|
rpc Remove(RemoveSnapshotRequest) returns (google.protobuf.Empty);
|
||||||
|
rpc Stat(StatSnapshotRequest) returns (StatSnapshotResponse);
|
||||||
|
rpc List(ListSnapshotsRequest) returns (stream ListSnapshotsResponse);
|
||||||
|
rpc Usage(UsageRequest) returns (UsageResponse);
|
||||||
|
}
|
||||||
|
|
||||||
|
message PrepareSnapshotRequest {
|
||||||
|
string snapshotter = 1;
|
||||||
|
string key = 2;
|
||||||
|
string parent = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message PrepareSnapshotResponse {
|
||||||
|
repeated containerd.types.Mount mounts = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ViewSnapshotRequest {
|
||||||
|
string snapshotter = 1;
|
||||||
|
string key = 2;
|
||||||
|
string parent = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ViewSnapshotResponse {
|
||||||
|
repeated containerd.types.Mount mounts = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message MountsRequest {
|
||||||
|
string snapshotter = 1;
|
||||||
|
string key = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message MountsResponse {
|
||||||
|
repeated containerd.types.Mount mounts = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message RemoveSnapshotRequest {
|
||||||
|
string snapshotter = 1;
|
||||||
|
string key = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message CommitSnapshotRequest {
|
||||||
|
string snapshotter = 1;
|
||||||
|
string name = 2;
|
||||||
|
string key = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message StatSnapshotRequest {
|
||||||
|
string snapshotter = 1;
|
||||||
|
string key = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
enum Kind {
|
||||||
|
option (gogoproto.goproto_enum_prefix) = false;
|
||||||
|
option (gogoproto.enum_customname) = "Kind";
|
||||||
|
|
||||||
|
UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "KindUnknown"];
|
||||||
|
VIEW = 1 [(gogoproto.enumvalue_customname) = "KindView"];
|
||||||
|
ACTIVE = 2 [(gogoproto.enumvalue_customname) = "KindActive"];
|
||||||
|
COMMITTED = 3 [(gogoproto.enumvalue_customname) = "KindCommitted"];
|
||||||
|
}
|
||||||
|
|
||||||
|
message Info {
|
||||||
|
string name = 1;
|
||||||
|
string parent = 2;
|
||||||
|
Kind kind = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message StatSnapshotResponse {
|
||||||
|
Info info = 1 [(gogoproto.nullable) = false];
|
||||||
|
}
|
||||||
|
|
||||||
|
message ListSnapshotsRequest{
|
||||||
|
string snapshotter = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ListSnapshotsResponse {
|
||||||
|
repeated Info info = 1 [(gogoproto.nullable) = false];
|
||||||
|
}
|
||||||
|
|
||||||
|
message UsageRequest {
|
||||||
|
string snapshotter = 1;
|
||||||
|
string key = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message UsageResponse {
|
||||||
|
int64 size = 1;
|
||||||
|
int64 inodes = 2;
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
180
vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.proto
generated
vendored
Normal file
180
vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.proto
generated
vendored
Normal file
@ -0,0 +1,180 @@
|
|||||||
|
syntax = "proto3";
|
||||||
|
|
||||||
|
package containerd.services.tasks.v1;
|
||||||
|
|
||||||
|
import "google/protobuf/empty.proto";
|
||||||
|
import "google/protobuf/any.proto";
|
||||||
|
import "gogoproto/gogo.proto";
|
||||||
|
import "github.com/containerd/containerd/api/types/mount.proto";
|
||||||
|
import "github.com/containerd/containerd/api/types/descriptor.proto";
|
||||||
|
import "github.com/containerd/containerd/api/types/task/task.proto";
|
||||||
|
import "google/protobuf/timestamp.proto";
|
||||||
|
|
||||||
|
option go_package = "github.com/containerd/containerd/api/services/tasks/v1;tasks";
|
||||||
|
|
||||||
|
service Tasks {
|
||||||
|
// Create a task.
|
||||||
|
rpc Create(CreateTaskRequest) returns (CreateTaskResponse);
|
||||||
|
|
||||||
|
// Start a task.
|
||||||
|
rpc Start(StartTaskRequest) returns (google.protobuf.Empty);
|
||||||
|
|
||||||
|
// Delete a task and on disk state.
|
||||||
|
rpc Delete(DeleteTaskRequest) returns (DeleteResponse);
|
||||||
|
|
||||||
|
rpc DeleteProcess(DeleteProcessRequest) returns (DeleteResponse);
|
||||||
|
|
||||||
|
rpc Get(GetTaskRequest) returns (GetTaskResponse);
|
||||||
|
|
||||||
|
rpc List(ListTasksRequest) returns (ListTasksResponse);
|
||||||
|
|
||||||
|
// Kill a task or process.
|
||||||
|
rpc Kill(KillRequest) returns (google.protobuf.Empty);
|
||||||
|
|
||||||
|
rpc Exec(ExecProcessRequest) returns (ExecProcessResponse);
|
||||||
|
|
||||||
|
rpc ResizePty(ResizePtyRequest) returns (google.protobuf.Empty);
|
||||||
|
|
||||||
|
rpc CloseIO(CloseIORequest) returns (google.protobuf.Empty);
|
||||||
|
|
||||||
|
rpc Pause(PauseTaskRequest) returns (google.protobuf.Empty);
|
||||||
|
|
||||||
|
rpc Resume(ResumeTaskRequest) returns (google.protobuf.Empty);
|
||||||
|
|
||||||
|
rpc ListPids(ListPidsRequest) returns (ListPidsResponse);
|
||||||
|
|
||||||
|
rpc Checkpoint(CheckpointTaskRequest) returns (CheckpointTaskResponse);
|
||||||
|
|
||||||
|
rpc Update(UpdateTaskRequest) returns (google.protobuf.Empty);
|
||||||
|
}
|
||||||
|
|
||||||
|
message CreateTaskRequest {
|
||||||
|
string container_id = 1;
|
||||||
|
|
||||||
|
// RootFS provides the pre-chroot mounts to perform in the shim before
|
||||||
|
// executing the container task.
|
||||||
|
//
|
||||||
|
// These are for mounts that cannot be performed in the user namespace.
|
||||||
|
// Typically, these mounts should be resolved from snapshots specified on
|
||||||
|
// the container object.
|
||||||
|
repeated containerd.types.Mount rootfs = 3;
|
||||||
|
|
||||||
|
string stdin = 4;
|
||||||
|
string stdout = 5;
|
||||||
|
string stderr = 6;
|
||||||
|
bool terminal = 7;
|
||||||
|
|
||||||
|
containerd.types.Descriptor checkpoint = 8;
|
||||||
|
|
||||||
|
google.protobuf.Any options = 9;
|
||||||
|
}
|
||||||
|
|
||||||
|
message CreateTaskResponse {
|
||||||
|
string container_id = 1;
|
||||||
|
uint32 pid = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message StartTaskRequest {
|
||||||
|
string container_id = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message DeleteTaskRequest {
|
||||||
|
string container_id = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message DeleteResponse {
|
||||||
|
string id = 1;
|
||||||
|
uint32 pid = 2;
|
||||||
|
uint32 exit_status = 3;
|
||||||
|
google.protobuf.Timestamp exited_at = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||||
|
}
|
||||||
|
|
||||||
|
message DeleteProcessRequest {
|
||||||
|
string container_id = 1;
|
||||||
|
string exec_id = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message GetTaskRequest {
|
||||||
|
string container_id = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message GetTaskResponse {
|
||||||
|
containerd.v1.types.Task task = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ListTasksRequest {
|
||||||
|
string filter = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ListTasksResponse {
|
||||||
|
repeated containerd.v1.types.Task tasks = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message KillRequest {
|
||||||
|
string container_id = 1;
|
||||||
|
string exec_id = 2;
|
||||||
|
uint32 signal = 3;
|
||||||
|
bool all = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ExecProcessRequest {
|
||||||
|
string container_id = 1;
|
||||||
|
string stdin = 2;
|
||||||
|
string stdout = 3;
|
||||||
|
string stderr = 4;
|
||||||
|
bool terminal = 5;
|
||||||
|
// Spec for starting a process in the target container.
|
||||||
|
//
|
||||||
|
// For runc, this is a process spec, for example.
|
||||||
|
google.protobuf.Any spec = 6;
|
||||||
|
// id of the exec process
|
||||||
|
string exec_id = 7;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ExecProcessResponse {
|
||||||
|
uint32 pid = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ResizePtyRequest {
|
||||||
|
string container_id = 1;
|
||||||
|
string exec_id = 2;
|
||||||
|
uint32 width = 3;
|
||||||
|
uint32 height = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
message CloseIORequest {
|
||||||
|
string container_id = 1;
|
||||||
|
string exec_id = 2;
|
||||||
|
bool stdin = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message PauseTaskRequest {
|
||||||
|
string container_id = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ResumeTaskRequest {
|
||||||
|
string container_id = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ListPidsRequest {
|
||||||
|
string container_id = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ListPidsResponse{
|
||||||
|
repeated uint32 pids = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message CheckpointTaskRequest {
|
||||||
|
string container_id = 1;
|
||||||
|
string parent_checkpoint = 2 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
||||||
|
google.protobuf.Any options = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message CheckpointTaskResponse {
|
||||||
|
repeated containerd.types.Descriptor descriptors = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message UpdateTaskRequest {
|
||||||
|
string container_id = 1;
|
||||||
|
google.protobuf.Any resources = 2;
|
||||||
|
}
|
@ -1,12 +1,12 @@
|
|||||||
// Code generated by protoc-gen-gogo.
|
// Code generated by protoc-gen-gogo.
|
||||||
// source: github.com/containerd/containerd/api/services/version/version.proto
|
// source: github.com/containerd/containerd/api/services/version/v1/version.proto
|
||||||
// DO NOT EDIT!
|
// DO NOT EDIT!
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Package version is a generated protocol buffer package.
|
Package version is a generated protocol buffer package.
|
||||||
|
|
||||||
It is generated from these files:
|
It is generated from these files:
|
||||||
github.com/containerd/containerd/api/services/version/version.proto
|
github.com/containerd/containerd/api/services/version/v1/version.proto
|
||||||
|
|
||||||
It has these top-level messages:
|
It has these top-level messages:
|
||||||
VersionResponse
|
VersionResponse
|
||||||
@ -50,7 +50,7 @@ func (*VersionResponse) ProtoMessage() {}
|
|||||||
func (*VersionResponse) Descriptor() ([]byte, []int) { return fileDescriptorVersion, []int{0} }
|
func (*VersionResponse) Descriptor() ([]byte, []int) { return fileDescriptorVersion, []int{0} }
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
proto.RegisterType((*VersionResponse)(nil), "containerd.v1.VersionResponse")
|
proto.RegisterType((*VersionResponse)(nil), "containerd.services.version.v1.VersionResponse")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
@ -77,7 +77,7 @@ func NewVersionClient(cc *grpc.ClientConn) VersionClient {
|
|||||||
|
|
||||||
func (c *versionClient) Version(ctx context.Context, in *google_protobuf.Empty, opts ...grpc.CallOption) (*VersionResponse, error) {
|
func (c *versionClient) Version(ctx context.Context, in *google_protobuf.Empty, opts ...grpc.CallOption) (*VersionResponse, error) {
|
||||||
out := new(VersionResponse)
|
out := new(VersionResponse)
|
||||||
err := grpc.Invoke(ctx, "/containerd.v1.Version/Version", in, out, c.cc, opts...)
|
err := grpc.Invoke(ctx, "/containerd.services.version.v1.Version/Version", in, out, c.cc, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -104,7 +104,7 @@ func _Version_Version_Handler(srv interface{}, ctx context.Context, dec func(int
|
|||||||
}
|
}
|
||||||
info := &grpc.UnaryServerInfo{
|
info := &grpc.UnaryServerInfo{
|
||||||
Server: srv,
|
Server: srv,
|
||||||
FullMethod: "/containerd.v1.Version/Version",
|
FullMethod: "/containerd.services.version.v1.Version/Version",
|
||||||
}
|
}
|
||||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
return srv.(VersionServer).Version(ctx, req.(*google_protobuf.Empty))
|
return srv.(VersionServer).Version(ctx, req.(*google_protobuf.Empty))
|
||||||
@ -113,7 +113,7 @@ func _Version_Version_Handler(srv interface{}, ctx context.Context, dec func(int
|
|||||||
}
|
}
|
||||||
|
|
||||||
var _Version_serviceDesc = grpc.ServiceDesc{
|
var _Version_serviceDesc = grpc.ServiceDesc{
|
||||||
ServiceName: "containerd.v1.Version",
|
ServiceName: "containerd.services.version.v1.Version",
|
||||||
HandlerType: (*VersionServer)(nil),
|
HandlerType: (*VersionServer)(nil),
|
||||||
Methods: []grpc.MethodDesc{
|
Methods: []grpc.MethodDesc{
|
||||||
{
|
{
|
||||||
@ -122,7 +122,7 @@ var _Version_serviceDesc = grpc.ServiceDesc{
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
Streams: []grpc.StreamDesc{},
|
Streams: []grpc.StreamDesc{},
|
||||||
Metadata: "github.com/containerd/containerd/api/services/version/version.proto",
|
Metadata: "github.com/containerd/containerd/api/services/version/v1/version.proto",
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *VersionResponse) Marshal() (dAtA []byte, err error) {
|
func (m *VersionResponse) Marshal() (dAtA []byte, err error) {
|
||||||
@ -442,24 +442,25 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
proto.RegisterFile("github.com/containerd/containerd/api/services/version/version.proto", fileDescriptorVersion)
|
proto.RegisterFile("github.com/containerd/containerd/api/services/version/v1/version.proto", fileDescriptorVersion)
|
||||||
}
|
}
|
||||||
|
|
||||||
var fileDescriptorVersion = []byte{
|
var fileDescriptorVersion = []byte{
|
||||||
// 225 bytes of a gzipped FileDescriptorProto
|
// 241 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x4e, 0xcf, 0x2c, 0xc9,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x4b, 0xcf, 0x2c, 0xc9,
|
||||||
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
|
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
|
||||||
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x17, 0xa7, 0x16, 0x95, 0x65, 0x26, 0xa7, 0x16, 0xeb,
|
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x17, 0xa7, 0x16, 0x95, 0x65, 0x26, 0xa7, 0x16, 0xeb,
|
||||||
0x97, 0xa5, 0x16, 0x15, 0x67, 0xe6, 0xe7, 0xc1, 0x68, 0xbd, 0x82, 0xa2, 0xfc, 0x92, 0x7c, 0x21,
|
0x97, 0xa5, 0x16, 0x15, 0x67, 0xe6, 0xe7, 0xe9, 0x97, 0x19, 0xc2, 0x98, 0x7a, 0x05, 0x45, 0xf9,
|
||||||
0x5e, 0x84, 0x72, 0xbd, 0x32, 0x43, 0x29, 0xe9, 0xf4, 0xfc, 0xfc, 0xf4, 0x9c, 0x54, 0x7d, 0xb0,
|
0x25, 0xf9, 0x42, 0x72, 0x08, 0x1d, 0x7a, 0x30, 0xd5, 0x7a, 0x30, 0x25, 0x65, 0x86, 0x52, 0xd2,
|
||||||
0x64, 0x52, 0x69, 0x9a, 0x7e, 0x6a, 0x6e, 0x41, 0x49, 0x25, 0x44, 0xad, 0x94, 0x48, 0x7a, 0x7e,
|
0xe9, 0xf9, 0xf9, 0xe9, 0x39, 0xa9, 0xfa, 0x60, 0xd5, 0x49, 0xa5, 0x69, 0xfa, 0xa9, 0xb9, 0x05,
|
||||||
0x7a, 0x3e, 0x98, 0xa9, 0x0f, 0x62, 0x41, 0x44, 0x95, 0xdc, 0xb9, 0xf8, 0xc3, 0x20, 0x46, 0x06,
|
0x25, 0x95, 0x10, 0xcd, 0x52, 0x22, 0xe9, 0xf9, 0xe9, 0xf9, 0x60, 0xa6, 0x3e, 0x88, 0x05, 0x11,
|
||||||
0xa5, 0x16, 0x17, 0xe4, 0xe7, 0x15, 0xa7, 0x0a, 0x49, 0x70, 0xb1, 0x43, 0x6d, 0x91, 0x60, 0x54,
|
0x55, 0x72, 0xe7, 0xe2, 0x0f, 0x83, 0x18, 0x10, 0x94, 0x5a, 0x5c, 0x90, 0x9f, 0x57, 0x9c, 0x2a,
|
||||||
0x60, 0xd4, 0xe0, 0x0c, 0x82, 0x71, 0x85, 0xa4, 0xb8, 0x38, 0x8a, 0x52, 0xcb, 0x32, 0xc1, 0x52,
|
0x24, 0xc1, 0xc5, 0x0e, 0x35, 0x53, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, 0x08, 0xc6, 0x15, 0x92,
|
||||||
0x4c, 0x60, 0x29, 0x38, 0xdf, 0xc8, 0x87, 0x8b, 0x1d, 0x6a, 0x90, 0x90, 0x23, 0x82, 0x29, 0xa6,
|
0xe2, 0xe2, 0x28, 0x4a, 0x2d, 0xcb, 0x04, 0x4b, 0x31, 0x81, 0xa5, 0xe0, 0x7c, 0xa3, 0x58, 0x2e,
|
||||||
0x07, 0x71, 0x92, 0x1e, 0xcc, 0x49, 0x7a, 0xae, 0x20, 0x27, 0x49, 0xc9, 0xe9, 0xa1, 0xb8, 0x5c,
|
0x76, 0xa8, 0x41, 0x42, 0x41, 0x08, 0xa6, 0x98, 0x1e, 0xc4, 0x49, 0x7a, 0x30, 0x27, 0xe9, 0xb9,
|
||||||
0x0f, 0xcd, 0x0d, 0x4e, 0x12, 0x27, 0x1e, 0xca, 0x31, 0xdc, 0x78, 0x28, 0xc7, 0xd0, 0xf0, 0x48,
|
0x82, 0x9c, 0x24, 0xa5, 0xaf, 0x87, 0xdf, 0x2b, 0x7a, 0x68, 0x8e, 0x72, 0x8a, 0x3a, 0xf1, 0x50,
|
||||||
0x8e, 0xf1, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x4c, 0x62,
|
0x8e, 0xe1, 0xc6, 0x43, 0x39, 0x86, 0x86, 0x47, 0x72, 0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78,
|
||||||
0x03, 0x9b, 0x64, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0xb4, 0x27, 0xa4, 0xd8, 0x40, 0x01, 0x00,
|
0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0x63, 0x94, 0x03, 0xb9, 0x81, 0x6b, 0x0d, 0x65, 0x26, 0xb1,
|
||||||
|
0x81, 0x1d, 0x67, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0xb6, 0x37, 0xd8, 0xc6, 0xa7, 0x01, 0x00,
|
||||||
0x00,
|
0x00,
|
||||||
}
|
}
|
@ -1,10 +1,13 @@
|
|||||||
syntax = "proto3";
|
syntax = "proto3";
|
||||||
|
|
||||||
package containerd.v1;
|
package containerd.services.version.v1;
|
||||||
|
|
||||||
import "google/protobuf/empty.proto";
|
import "google/protobuf/empty.proto";
|
||||||
import "gogoproto/gogo.proto";
|
import "gogoproto/gogo.proto";
|
||||||
|
|
||||||
|
// TODO(stevvooe): Should version service actually be versioned?
|
||||||
|
option go_package = "github.com/containerd/containerd/api/services/version/v1;version";
|
||||||
|
|
||||||
service Version {
|
service Version {
|
||||||
rpc Version(google.protobuf.Empty) returns (VersionResponse);
|
rpc Version(google.protobuf.Empty) returns (VersionResponse);
|
||||||
}
|
}
|
@ -1,17 +1,19 @@
|
|||||||
// Code generated by protoc-gen-gogo.
|
// Code generated by protoc-gen-gogo.
|
||||||
// source: github.com/containerd/containerd/api/types/descriptor/descriptor.proto
|
// source: github.com/containerd/containerd/api/types/descriptor.proto
|
||||||
// DO NOT EDIT!
|
// DO NOT EDIT!
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Package descriptor is a generated protocol buffer package.
|
Package types is a generated protocol buffer package.
|
||||||
|
|
||||||
It is generated from these files:
|
It is generated from these files:
|
||||||
github.com/containerd/containerd/api/types/descriptor/descriptor.proto
|
github.com/containerd/containerd/api/types/descriptor.proto
|
||||||
|
github.com/containerd/containerd/api/types/mount.proto
|
||||||
|
|
||||||
It has these top-level messages:
|
It has these top-level messages:
|
||||||
Descriptor
|
Descriptor
|
||||||
|
Mount
|
||||||
*/
|
*/
|
||||||
package descriptor
|
package types
|
||||||
|
|
||||||
import proto "github.com/gogo/protobuf/proto"
|
import proto "github.com/gogo/protobuf/proto"
|
||||||
import fmt "fmt"
|
import fmt "fmt"
|
||||||
@ -52,7 +54,7 @@ func (*Descriptor) ProtoMessage() {}
|
|||||||
func (*Descriptor) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{0} }
|
func (*Descriptor) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{0} }
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
proto.RegisterType((*Descriptor)(nil), "containerd.v1.types.Descriptor")
|
proto.RegisterType((*Descriptor)(nil), "containerd.types.Descriptor")
|
||||||
}
|
}
|
||||||
func (m *Descriptor) Marshal() (dAtA []byte, err error) {
|
func (m *Descriptor) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
@ -399,24 +401,24 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
proto.RegisterFile("github.com/containerd/containerd/api/types/descriptor/descriptor.proto", fileDescriptorDescriptor)
|
proto.RegisterFile("github.com/containerd/containerd/api/types/descriptor.proto", fileDescriptorDescriptor)
|
||||||
}
|
}
|
||||||
|
|
||||||
var fileDescriptorDescriptor = []byte{
|
var fileDescriptorDescriptor = []byte{
|
||||||
// 229 bytes of a gzipped FileDescriptorProto
|
// 232 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x4b, 0xcf, 0x2c, 0xc9,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4e, 0xcf, 0x2c, 0xc9,
|
||||||
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
|
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
|
||||||
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xa7, 0xa4, 0x16,
|
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xa7, 0xa4, 0x16,
|
||||||
0x27, 0x17, 0x65, 0x16, 0x94, 0xe4, 0x17, 0x21, 0x31, 0xf5, 0x0a, 0x8a, 0xf2, 0x4b, 0xf2, 0x85,
|
0x27, 0x17, 0x65, 0x16, 0x94, 0xe4, 0x17, 0xe9, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20,
|
||||||
0x84, 0x11, 0x3a, 0xf4, 0xca, 0x0c, 0xf5, 0xc0, 0x1a, 0xa4, 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1,
|
0x94, 0xe9, 0x81, 0x95, 0x48, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x25, 0xf5, 0x41, 0x2c, 0x88,
|
||||||
0xf2, 0xfa, 0x20, 0x16, 0x44, 0xa9, 0x52, 0x37, 0x23, 0x17, 0x97, 0x0b, 0x5c, 0xbf, 0x90, 0x2c,
|
0x3a, 0xa5, 0x6e, 0x46, 0x2e, 0x2e, 0x17, 0xb8, 0x66, 0x21, 0x59, 0x2e, 0xae, 0xdc, 0xd4, 0x94,
|
||||||
0x17, 0x57, 0x6e, 0x6a, 0x4a, 0x66, 0x62, 0x3c, 0x48, 0x8f, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67,
|
0xcc, 0xc4, 0x78, 0x90, 0x1e, 0x09, 0x46, 0x05, 0x46, 0x0d, 0xce, 0x20, 0x4e, 0xb0, 0x48, 0x48,
|
||||||
0x10, 0x27, 0x58, 0x24, 0xa4, 0xb2, 0x20, 0x55, 0xc8, 0x8b, 0x8b, 0x2d, 0x25, 0x33, 0x3d, 0xb5,
|
0x65, 0x41, 0xaa, 0x90, 0x17, 0x17, 0x5b, 0x4a, 0x66, 0x7a, 0x6a, 0x71, 0x89, 0x04, 0x13, 0x48,
|
||||||
0xb8, 0x44, 0x82, 0x09, 0x24, 0xe5, 0x64, 0x74, 0xe2, 0x9e, 0x3c, 0xc3, 0xad, 0x7b, 0xf2, 0x5a,
|
0xca, 0xc9, 0xe8, 0xc4, 0x3d, 0x79, 0x86, 0x5b, 0xf7, 0xe4, 0xb5, 0x90, 0x9c, 0x9a, 0x5f, 0x90,
|
||||||
0x48, 0x0e, 0xcf, 0x2f, 0x48, 0xcd, 0x83, 0xdb, 0x5f, 0xac, 0x9f, 0x9e, 0xaf, 0x0b, 0xd1, 0xa2,
|
0x9a, 0x07, 0xb7, 0xbc, 0x58, 0x3f, 0x3d, 0x5f, 0x17, 0xa2, 0x45, 0xcf, 0x05, 0x4c, 0x05, 0x41,
|
||||||
0xe7, 0x02, 0xa6, 0x82, 0xa0, 0x26, 0x08, 0x09, 0x71, 0xb1, 0x14, 0x67, 0x56, 0xa5, 0x4a, 0x30,
|
0x4d, 0x10, 0x12, 0xe2, 0x62, 0x29, 0xce, 0xac, 0x4a, 0x95, 0x60, 0x56, 0x60, 0xd4, 0x60, 0x0e,
|
||||||
0x2b, 0x30, 0x6a, 0x30, 0x07, 0x81, 0xd9, 0x4e, 0x12, 0x27, 0x1e, 0xca, 0x31, 0xdc, 0x78, 0x28,
|
0x02, 0xb3, 0x9d, 0xbc, 0x4e, 0x3c, 0x94, 0x63, 0xb8, 0xf1, 0x50, 0x8e, 0xa1, 0xe1, 0x91, 0x1c,
|
||||||
0xc7, 0xd0, 0xf0, 0x48, 0x8e, 0xf1, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c,
|
0xe3, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x18, 0x65, 0x40,
|
||||||
0x92, 0x63, 0x4c, 0x62, 0x03, 0x3b, 0xd7, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x45, 0x60, 0xfd,
|
0x7c, 0x60, 0x58, 0x83, 0xc9, 0x24, 0x36, 0xb0, 0x07, 0x8d, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff,
|
||||||
0x5b, 0x23, 0x01, 0x00, 0x00,
|
0x23, 0x14, 0xc9, 0x7c, 0x47, 0x01, 0x00, 0x00,
|
||||||
}
|
}
|
@ -1,9 +1,11 @@
|
|||||||
syntax = "proto3";
|
syntax = "proto3";
|
||||||
|
|
||||||
package containerd.v1.types;
|
package containerd.types;
|
||||||
|
|
||||||
import "gogoproto/gogo.proto";
|
import "gogoproto/gogo.proto";
|
||||||
|
|
||||||
|
option go_package = "github.com/containerd/containerd/api/types;types";
|
||||||
|
|
||||||
// Descriptor describes a blob in a content store.
|
// Descriptor describes a blob in a content store.
|
||||||
//
|
//
|
||||||
// This descriptor can be used to reference content from an
|
// This descriptor can be used to reference content from an
|
1
vendor/github.com/containerd/containerd/api/types/doc.go
generated
vendored
Normal file
1
vendor/github.com/containerd/containerd/api/types/doc.go
generated
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
package types
|
@ -1,17 +1,8 @@
|
|||||||
// Code generated by protoc-gen-gogo.
|
// Code generated by protoc-gen-gogo.
|
||||||
// source: github.com/containerd/containerd/api/types/mount/mount.proto
|
// source: github.com/containerd/containerd/api/types/mount.proto
|
||||||
// DO NOT EDIT!
|
// DO NOT EDIT!
|
||||||
|
|
||||||
/*
|
package types
|
||||||
Package mount is a generated protocol buffer package.
|
|
||||||
|
|
||||||
It is generated from these files:
|
|
||||||
github.com/containerd/containerd/api/types/mount/mount.proto
|
|
||||||
|
|
||||||
It has these top-level messages:
|
|
||||||
Mount
|
|
||||||
*/
|
|
||||||
package mount
|
|
||||||
|
|
||||||
import proto "github.com/gogo/protobuf/proto"
|
import proto "github.com/gogo/protobuf/proto"
|
||||||
import fmt "fmt"
|
import fmt "fmt"
|
||||||
@ -28,12 +19,6 @@ var _ = proto.Marshal
|
|||||||
var _ = fmt.Errorf
|
var _ = fmt.Errorf
|
||||||
var _ = math.Inf
|
var _ = math.Inf
|
||||||
|
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
|
||||||
// is compatible with the proto package it is being compiled against.
|
|
||||||
// A compilation error at this line likely means your copy of the
|
|
||||||
// proto package needs to be updated.
|
|
||||||
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
|
||||||
|
|
||||||
// Mount describes mounts for a container.
|
// Mount describes mounts for a container.
|
||||||
//
|
//
|
||||||
// This type is the lingua franca of ContainerD. All services provide mounts
|
// This type is the lingua franca of ContainerD. All services provide mounts
|
||||||
@ -58,7 +43,7 @@ func (*Mount) ProtoMessage() {}
|
|||||||
func (*Mount) Descriptor() ([]byte, []int) { return fileDescriptorMount, []int{0} }
|
func (*Mount) Descriptor() ([]byte, []int) { return fileDescriptorMount, []int{0} }
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
proto.RegisterType((*Mount)(nil), "containerd.v1.types.Mount")
|
proto.RegisterType((*Mount)(nil), "containerd.types.Mount")
|
||||||
}
|
}
|
||||||
func (m *Mount) Marshal() (dAtA []byte, err error) {
|
func (m *Mount) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
@ -468,22 +453,22 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
proto.RegisterFile("github.com/containerd/containerd/api/types/mount/mount.proto", fileDescriptorMount)
|
proto.RegisterFile("github.com/containerd/containerd/api/types/mount.proto", fileDescriptorMount)
|
||||||
}
|
}
|
||||||
|
|
||||||
var fileDescriptorMount = []byte{
|
var fileDescriptorMount = []byte{
|
||||||
// 197 bytes of a gzipped FileDescriptorProto
|
// 200 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x49, 0xcf, 0x2c, 0xc9,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x4b, 0xcf, 0x2c, 0xc9,
|
||||||
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
|
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
|
||||||
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xe7, 0xe6, 0x97,
|
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xe7, 0xe6, 0x97,
|
||||||
0xe6, 0x95, 0x40, 0x48, 0xbd, 0x82, 0xa2, 0xfc, 0x92, 0x7c, 0x21, 0x61, 0x84, 0x3a, 0xbd, 0x32,
|
0xe6, 0x95, 0xe8, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20, 0x54, 0xe8, 0x81, 0x65, 0xa5,
|
||||||
0x43, 0x3d, 0xb0, 0x32, 0x29, 0x91, 0xf4, 0xfc, 0xf4, 0x7c, 0xb0, 0xbc, 0x3e, 0x88, 0x05, 0x51,
|
0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x92, 0xfa, 0x20, 0x16, 0x44, 0x9d, 0x52, 0x2a, 0x17, 0xab,
|
||||||
0xaa, 0x94, 0xca, 0xc5, 0xea, 0x0b, 0xd2, 0x29, 0x24, 0xc4, 0xc5, 0x02, 0x52, 0x27, 0xc1, 0xa8,
|
0x2f, 0x48, 0x9b, 0x90, 0x10, 0x17, 0x0b, 0x48, 0x9d, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10,
|
||||||
0xc0, 0xa8, 0xc1, 0x19, 0x04, 0x66, 0x0b, 0x89, 0x71, 0xb1, 0x15, 0xe7, 0x97, 0x16, 0x25, 0xa7,
|
0x98, 0x2d, 0x24, 0xc6, 0xc5, 0x56, 0x9c, 0x5f, 0x5a, 0x94, 0x9c, 0x2a, 0xc1, 0x04, 0x16, 0x85,
|
||||||
0x4a, 0x30, 0x81, 0x45, 0xa1, 0x3c, 0x90, 0x78, 0x49, 0x62, 0x51, 0x7a, 0x6a, 0x89, 0x04, 0x33,
|
0xf2, 0x40, 0xe2, 0x25, 0x89, 0x45, 0xe9, 0xa9, 0x25, 0x12, 0xcc, 0x10, 0x71, 0x08, 0x4f, 0x48,
|
||||||
0x44, 0x1c, 0xc2, 0x13, 0x92, 0xe0, 0x62, 0xcf, 0x2f, 0x28, 0xc9, 0xcc, 0xcf, 0x2b, 0x96, 0x60,
|
0x82, 0x8b, 0x3d, 0xbf, 0xa0, 0x24, 0x33, 0x3f, 0xaf, 0x58, 0x82, 0x45, 0x81, 0x59, 0x83, 0x33,
|
||||||
0x51, 0x60, 0xd6, 0xe0, 0x0c, 0x82, 0x71, 0x9d, 0x24, 0x4e, 0x3c, 0x94, 0x63, 0xb8, 0xf1, 0x50,
|
0x08, 0xc6, 0x75, 0xf2, 0x3a, 0xf1, 0x50, 0x8e, 0xe1, 0xc6, 0x43, 0x39, 0x86, 0x86, 0x47, 0x72,
|
||||||
0x8e, 0xa1, 0xe1, 0x91, 0x1c, 0xe3, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78,
|
0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0x63, 0x94, 0x01,
|
||||||
0x24, 0xc7, 0x98, 0xc4, 0x06, 0x76, 0x87, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0xbe, 0xda, 0x1c,
|
0xf1, 0x1e, 0xb4, 0x06, 0x93, 0x49, 0x6c, 0x60, 0x97, 0x1b, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff,
|
||||||
0x59, 0xf2, 0x00, 0x00, 0x00,
|
0xe5, 0xc7, 0x07, 0x3f, 0x1b, 0x01, 0x00, 0x00,
|
||||||
}
|
}
|
@ -1,9 +1,11 @@
|
|||||||
syntax = "proto3";
|
syntax = "proto3";
|
||||||
|
|
||||||
package containerd.v1.types;
|
package containerd.types;
|
||||||
|
|
||||||
import "gogoproto/gogo.proto";
|
import "gogoproto/gogo.proto";
|
||||||
|
|
||||||
|
option go_package = "github.com/containerd/containerd/api/types;types";
|
||||||
|
|
||||||
// Mount describes mounts for a container.
|
// Mount describes mounts for a container.
|
||||||
//
|
//
|
||||||
// This type is the lingua franca of ContainerD. All services provide mounts
|
// This type is the lingua franca of ContainerD. All services provide mounts
|
1303
vendor/github.com/containerd/containerd/api/types/task/task.pb.go
generated
vendored
1303
vendor/github.com/containerd/containerd/api/types/task/task.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
55
vendor/github.com/containerd/containerd/api/types/task/task.proto
generated
vendored
55
vendor/github.com/containerd/containerd/api/types/task/task.proto
generated
vendored
@ -4,7 +4,6 @@ package containerd.v1.types;
|
|||||||
|
|
||||||
import "gogoproto/gogo.proto";
|
import "gogoproto/gogo.proto";
|
||||||
import "google/protobuf/any.proto";
|
import "google/protobuf/any.proto";
|
||||||
import "google/protobuf/timestamp.proto";
|
|
||||||
|
|
||||||
enum Status {
|
enum Status {
|
||||||
option (gogoproto.goproto_enum_prefix) = false;
|
option (gogoproto.goproto_enum_prefix) = false;
|
||||||
@ -18,52 +17,12 @@ enum Status {
|
|||||||
}
|
}
|
||||||
|
|
||||||
message Task {
|
message Task {
|
||||||
string id = 1; // TODO(stevvooe): For now, this is just the container id.
|
|
||||||
string container_id = 2;
|
|
||||||
uint32 pid = 3;
|
|
||||||
Status status = 4;
|
|
||||||
google.protobuf.Any spec = 5;
|
|
||||||
string stdin = 6;
|
|
||||||
string stdout = 7;
|
|
||||||
string stderr = 8;
|
|
||||||
bool terminal = 9;
|
|
||||||
}
|
|
||||||
|
|
||||||
message Process {
|
|
||||||
uint32 pid = 1;
|
|
||||||
repeated string args = 2;
|
|
||||||
repeated string env = 3;
|
|
||||||
User user = 4;
|
|
||||||
string cwd = 5;
|
|
||||||
bool terminal = 6;
|
|
||||||
uint32 exit_status = 7;
|
|
||||||
Status status = 8;
|
|
||||||
google.protobuf.Any runtime_data = 9;
|
|
||||||
string stdin = 10;
|
|
||||||
string stdout = 11;
|
|
||||||
string stderr = 12;
|
|
||||||
}
|
|
||||||
|
|
||||||
message User {
|
|
||||||
uint32 uid = 1;
|
|
||||||
uint32 gid = 2;
|
|
||||||
repeated uint32 additional_gids = 3;
|
|
||||||
}
|
|
||||||
|
|
||||||
message Event {
|
|
||||||
string id = 1;
|
string id = 1;
|
||||||
|
uint32 pid = 2;
|
||||||
enum EventType {
|
Status status = 3;
|
||||||
EXIT = 0;
|
google.protobuf.Any spec = 4;
|
||||||
OOM = 1;
|
string stdin = 5;
|
||||||
CREATE = 2;
|
string stdout = 6;
|
||||||
START = 3;
|
string stderr = 7;
|
||||||
EXEC_ADDED = 4;
|
bool terminal = 8;
|
||||||
PAUSED = 5;
|
|
||||||
}
|
|
||||||
|
|
||||||
EventType type = 2;
|
|
||||||
uint32 pid = 3;
|
|
||||||
uint32 exit_status = 4;
|
|
||||||
google.protobuf.Timestamp exited_at = 5 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
|
||||||
}
|
}
|
||||||
|
13
vendor/github.com/containerd/containerd/apparmor.go
generated
vendored
Normal file
13
vendor/github.com/containerd/containerd/apparmor.go
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
// +build linux
|
||||||
|
|
||||||
|
package containerd
|
||||||
|
|
||||||
|
import specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
|
|
||||||
|
// WithApparmor sets the provided apparmor profile to the spec
|
||||||
|
func WithApparmorProfile(profile string) SpecOpts {
|
||||||
|
return func(s *specs.Spec) error {
|
||||||
|
s.Process.ApparmorProfile = profile
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
279
vendor/github.com/containerd/containerd/client.go
generated
vendored
279
vendor/github.com/containerd/containerd/client.go
generated
vendored
@ -2,23 +2,31 @@ package containerd
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"log"
|
"log"
|
||||||
"net/http"
|
"net/http"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/containerd/containerd/api/services/containers"
|
containersapi "github.com/containerd/containerd/api/services/containers/v1"
|
||||||
contentapi "github.com/containerd/containerd/api/services/content"
|
contentapi "github.com/containerd/containerd/api/services/content/v1"
|
||||||
diffapi "github.com/containerd/containerd/api/services/diff"
|
diffapi "github.com/containerd/containerd/api/services/diff/v1"
|
||||||
"github.com/containerd/containerd/api/services/execution"
|
eventsapi "github.com/containerd/containerd/api/services/events/v1"
|
||||||
imagesapi "github.com/containerd/containerd/api/services/images"
|
imagesapi "github.com/containerd/containerd/api/services/images/v1"
|
||||||
namespacesapi "github.com/containerd/containerd/api/services/namespaces"
|
namespacesapi "github.com/containerd/containerd/api/services/namespaces/v1"
|
||||||
snapshotapi "github.com/containerd/containerd/api/services/snapshot"
|
snapshotapi "github.com/containerd/containerd/api/services/snapshot/v1"
|
||||||
versionservice "github.com/containerd/containerd/api/services/version"
|
"github.com/containerd/containerd/api/services/tasks/v1"
|
||||||
|
versionservice "github.com/containerd/containerd/api/services/version/v1"
|
||||||
|
"github.com/containerd/containerd/containers"
|
||||||
"github.com/containerd/containerd/content"
|
"github.com/containerd/containerd/content"
|
||||||
|
"github.com/containerd/containerd/errdefs"
|
||||||
"github.com/containerd/containerd/images"
|
"github.com/containerd/containerd/images"
|
||||||
|
"github.com/containerd/containerd/plugin"
|
||||||
|
"github.com/containerd/containerd/reference"
|
||||||
"github.com/containerd/containerd/remotes"
|
"github.com/containerd/containerd/remotes"
|
||||||
"github.com/containerd/containerd/remotes/docker"
|
"github.com/containerd/containerd/remotes/docker"
|
||||||
"github.com/containerd/containerd/remotes/docker/schema1"
|
"github.com/containerd/containerd/remotes/docker/schema1"
|
||||||
@ -28,9 +36,11 @@ import (
|
|||||||
imagesservice "github.com/containerd/containerd/services/images"
|
imagesservice "github.com/containerd/containerd/services/images"
|
||||||
snapshotservice "github.com/containerd/containerd/services/snapshot"
|
snapshotservice "github.com/containerd/containerd/services/snapshot"
|
||||||
"github.com/containerd/containerd/snapshot"
|
"github.com/containerd/containerd/snapshot"
|
||||||
|
"github.com/containerd/containerd/typeurl"
|
||||||
pempty "github.com/golang/protobuf/ptypes/empty"
|
pempty "github.com/golang/protobuf/ptypes/empty"
|
||||||
"github.com/opencontainers/image-spec/identity"
|
"github.com/opencontainers/image-spec/identity"
|
||||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
|
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/grpc/grpclog"
|
"google.golang.org/grpc/grpclog"
|
||||||
@ -40,10 +50,18 @@ import (
|
|||||||
func init() {
|
func init() {
|
||||||
// reset the grpc logger so that it does not output in the STDIO of the calling process
|
// reset the grpc logger so that it does not output in the STDIO of the calling process
|
||||||
grpclog.SetLogger(log.New(ioutil.Discard, "", log.LstdFlags))
|
grpclog.SetLogger(log.New(ioutil.Discard, "", log.LstdFlags))
|
||||||
|
|
||||||
|
// register TypeUrls for commonly marshaled external types
|
||||||
|
major := strconv.Itoa(specs.VersionMajor)
|
||||||
|
typeurl.Register(&specs.Spec{}, "opencontainers/runtime-spec", major, "Spec")
|
||||||
|
typeurl.Register(&specs.Process{}, "opencontainers/runtime-spec", major, "Process")
|
||||||
|
typeurl.Register(&specs.LinuxResources{}, "opencontainers/runtime-spec", major, "LinuxResources")
|
||||||
|
typeurl.Register(&specs.WindowsResources{}, "opencontainers/runtime-spec", major, "WindowsResources")
|
||||||
}
|
}
|
||||||
|
|
||||||
type clientOpts struct {
|
type clientOpts struct {
|
||||||
defaultns string
|
defaultns string
|
||||||
|
dialOptions []grpc.DialOption
|
||||||
}
|
}
|
||||||
|
|
||||||
type ClientOpt func(c *clientOpts) error
|
type ClientOpt func(c *clientOpts) error
|
||||||
@ -55,6 +73,14 @@ func WithDefaultNamespace(ns string) ClientOpt {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithDialOpts allows grpc.DialOptions to be set on the connection
|
||||||
|
func WithDialOpts(opts []grpc.DialOption) ClientOpt {
|
||||||
|
return func(c *clientOpts) error {
|
||||||
|
c.dialOptions = opts
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// New returns a new containerd client that is connected to the containerd
|
// New returns a new containerd client that is connected to the containerd
|
||||||
// instance provided by address
|
// instance provided by address
|
||||||
func New(address string, opts ...ClientOpt) (*Client, error) {
|
func New(address string, opts ...ClientOpt) (*Client, error) {
|
||||||
@ -64,12 +90,16 @@ func New(address string, opts ...ClientOpt) (*Client, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
gopts := []grpc.DialOption{
|
gopts := []grpc.DialOption{
|
||||||
|
grpc.WithBlock(),
|
||||||
grpc.WithInsecure(),
|
grpc.WithInsecure(),
|
||||||
grpc.WithTimeout(100 * time.Second),
|
grpc.WithTimeout(100 * time.Second),
|
||||||
|
grpc.FailOnNonTempDialError(true),
|
||||||
grpc.WithDialer(dialer),
|
grpc.WithDialer(dialer),
|
||||||
}
|
}
|
||||||
|
if len(copts.dialOptions) > 0 {
|
||||||
|
gopts = copts.dialOptions
|
||||||
|
}
|
||||||
if copts.defaultns != "" {
|
if copts.defaultns != "" {
|
||||||
unary, stream := newNSInterceptors(copts.defaultns)
|
unary, stream := newNSInterceptors(copts.defaultns)
|
||||||
gopts = append(gopts,
|
gopts = append(gopts,
|
||||||
@ -77,14 +107,19 @@ func New(address string, opts ...ClientOpt) (*Client, error) {
|
|||||||
grpc.WithStreamInterceptor(stream),
|
grpc.WithStreamInterceptor(stream),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
conn, err := grpc.Dial(dialAddress(address), gopts...)
|
conn, err := grpc.Dial(dialAddress(address), gopts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "failed to dial %q", address)
|
return nil, errors.Wrapf(err, "failed to dial %q", address)
|
||||||
}
|
}
|
||||||
|
return NewWithConn(conn, opts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewWithConn returns a new containerd client that is connected to the containerd
|
||||||
|
// instance provided by the connection
|
||||||
|
func NewWithConn(conn *grpc.ClientConn, opts ...ClientOpt) (*Client, error) {
|
||||||
return &Client{
|
return &Client{
|
||||||
conn: conn,
|
conn: conn,
|
||||||
runtime: runtime.GOOS,
|
runtime: fmt.Sprintf("%s.%s", plugin.RuntimePlugin, runtime.GOOS),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -106,14 +141,14 @@ func (c *Client) IsServing(ctx context.Context) (bool, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Containers returns all containers created in containerd
|
// Containers returns all containers created in containerd
|
||||||
func (c *Client) Containers(ctx context.Context) ([]Container, error) {
|
func (c *Client) Containers(ctx context.Context, filters ...string) ([]Container, error) {
|
||||||
r, err := c.ContainerService().List(ctx, &containers.ListContainersRequest{})
|
r, err := c.ContainerService().List(ctx, filters...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var out []Container
|
var out []Container
|
||||||
for _, container := range r.Containers {
|
for _, container := range r {
|
||||||
out = append(out, containerFromProto(c, container))
|
out = append(out, containerFromRecord(c, container))
|
||||||
}
|
}
|
||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
@ -128,11 +163,11 @@ func WithContainerLabels(labels map[string]string) NewContainerOpts {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithExistingRootFS uses an existing root filesystem for the container
|
// WithSnapshot uses an existing root filesystem for the container
|
||||||
func WithExistingRootFS(id string) NewContainerOpts {
|
func WithSnapshot(id string) NewContainerOpts {
|
||||||
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
||||||
// check that the snapshot exists, if not, fail on creation
|
// check that the snapshot exists, if not, fail on creation
|
||||||
if _, err := client.SnapshotService().Mounts(ctx, id); err != nil {
|
if _, err := client.SnapshotService(c.Snapshotter).Mounts(ctx, id); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
c.RootFS = id
|
c.RootFS = id
|
||||||
@ -140,41 +175,54 @@ func WithExistingRootFS(id string) NewContainerOpts {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithNewRootFS allocates a new snapshot to be used by the container as the
|
// WithNewSnapshot allocates a new snapshot to be used by the container as the
|
||||||
// root filesystem in read-write mode
|
// root filesystem in read-write mode
|
||||||
func WithNewRootFS(id string, i Image) NewContainerOpts {
|
func WithNewSnapshot(id string, i Image) NewContainerOpts {
|
||||||
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
||||||
diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore())
|
diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if _, err := client.SnapshotService().Prepare(ctx, id, identity.ChainID(diffIDs).String()); err != nil {
|
if _, err := client.SnapshotService(c.Snapshotter).Prepare(ctx, id, identity.ChainID(diffIDs).String()); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
c.RootFS = id
|
c.RootFS = id
|
||||||
|
c.Image = i.Name()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithNewReadonlyRootFS allocates a new snapshot to be used by the container as the
|
// WithNewSnapshotView allocates a new snapshot to be used by the container as the
|
||||||
// root filesystem in read-only mode
|
// root filesystem in read-only mode
|
||||||
func WithNewReadonlyRootFS(id string, i Image) NewContainerOpts {
|
func WithNewSnapshotView(id string, i Image) NewContainerOpts {
|
||||||
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
||||||
diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore())
|
diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if _, err := client.SnapshotService().View(ctx, id, identity.ChainID(diffIDs).String()); err != nil {
|
if _, err := client.SnapshotService(c.Snapshotter).View(ctx, id, identity.ChainID(diffIDs).String()); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
c.RootFS = id
|
c.RootFS = id
|
||||||
|
c.Image = i.Name()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithRuntime allows a user to specify the runtime name and additional options that should
|
||||||
|
// be used to create tasks for the container
|
||||||
func WithRuntime(name string) NewContainerOpts {
|
func WithRuntime(name string) NewContainerOpts {
|
||||||
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
||||||
c.Runtime = name
|
c.Runtime = containers.RuntimeInfo{
|
||||||
|
Name: name,
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithSnapshotter(name string) NewContainerOpts {
|
||||||
|
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
||||||
|
c.Snapshotter = name
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -191,30 +239,28 @@ func WithImage(i Image) NewContainerOpts {
|
|||||||
func (c *Client) NewContainer(ctx context.Context, id string, opts ...NewContainerOpts) (Container, error) {
|
func (c *Client) NewContainer(ctx context.Context, id string, opts ...NewContainerOpts) (Container, error) {
|
||||||
container := containers.Container{
|
container := containers.Container{
|
||||||
ID: id,
|
ID: id,
|
||||||
Runtime: c.runtime,
|
Runtime: containers.RuntimeInfo{
|
||||||
|
Name: c.runtime,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
for _, o := range opts {
|
for _, o := range opts {
|
||||||
if err := o(ctx, c, &container); err != nil {
|
if err := o(ctx, c, &container); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
r, err := c.ContainerService().Create(ctx, &containers.CreateContainerRequest{
|
r, err := c.ContainerService().Create(ctx, container)
|
||||||
Container: container,
|
|
||||||
})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return containerFromProto(c, r.Container), nil
|
return containerFromRecord(c, r), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Client) LoadContainer(ctx context.Context, id string) (Container, error) {
|
func (c *Client) LoadContainer(ctx context.Context, id string) (Container, error) {
|
||||||
response, err := c.ContainerService().Get(ctx, &containers.GetContainerRequest{
|
r, err := c.ContainerService().Get(ctx, id)
|
||||||
ID: id,
|
|
||||||
})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return containerFromProto(c, response.Container), nil
|
return containerFromRecord(c, r), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type RemoteOpts func(*Client, *RemoteContext) error
|
type RemoteOpts func(*Client, *RemoteContext) error
|
||||||
@ -231,6 +277,9 @@ type RemoteContext struct {
|
|||||||
// afterwards. Unpacking is required to run an image.
|
// afterwards. Unpacking is required to run an image.
|
||||||
Unpack bool
|
Unpack bool
|
||||||
|
|
||||||
|
// Snapshotter used for unpacking
|
||||||
|
Snapshotter string
|
||||||
|
|
||||||
// BaseHandlers are a set of handlers which get are called on dispatch.
|
// BaseHandlers are a set of handlers which get are called on dispatch.
|
||||||
// These handlers always get called before any operation specific
|
// These handlers always get called before any operation specific
|
||||||
// handlers.
|
// handlers.
|
||||||
@ -258,6 +307,14 @@ func WithPullUnpack(client *Client, c *RemoteContext) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithPullSnapshotter specifies snapshotter name used for unpacking
|
||||||
|
func WithPullSnapshotter(snapshotterName string) RemoteOpts {
|
||||||
|
return func(client *Client, c *RemoteContext) error {
|
||||||
|
c.Snapshotter = snapshotterName
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// WithSchema1Conversion is used to convert Docker registry schema 1
|
// WithSchema1Conversion is used to convert Docker registry schema 1
|
||||||
// manifests to oci manifests on pull. Without this option schema 1
|
// manifests to oci manifests on pull. Without this option schema 1
|
||||||
// manifests will return a not supported error.
|
// manifests will return a not supported error.
|
||||||
@ -324,20 +381,33 @@ func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpts) (Imag
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
imgrec := images.Image{
|
||||||
|
Name: name,
|
||||||
|
Target: desc,
|
||||||
|
}
|
||||||
|
|
||||||
is := c.ImageService()
|
is := c.ImageService()
|
||||||
if err := is.Put(ctx, name, desc); err != nil {
|
if updated, err := is.Update(ctx, imgrec, "target"); err != nil {
|
||||||
|
if !errdefs.IsNotFound(err) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
i, err := is.Get(ctx, name)
|
|
||||||
|
created, err := is.Create(ctx, imgrec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
imgrec = created
|
||||||
|
} else {
|
||||||
|
imgrec = updated
|
||||||
|
}
|
||||||
|
|
||||||
img := &image{
|
img := &image{
|
||||||
client: c,
|
client: c,
|
||||||
i: i,
|
i: imgrec,
|
||||||
}
|
}
|
||||||
if pullCtx.Unpack {
|
if pullCtx.Unpack {
|
||||||
if err := img.Unpack(ctx); err != nil {
|
if err := img.Unpack(ctx, pullCtx.Snapshotter); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -433,20 +503,20 @@ func (c *Client) NamespaceService() namespacesapi.NamespacesClient {
|
|||||||
return namespacesapi.NewNamespacesClient(c.conn)
|
return namespacesapi.NewNamespacesClient(c.conn)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Client) ContainerService() containers.ContainersClient {
|
func (c *Client) ContainerService() containers.Store {
|
||||||
return containers.NewContainersClient(c.conn)
|
return NewRemoteContainerStore(containersapi.NewContainersClient(c.conn))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Client) ContentStore() content.Store {
|
func (c *Client) ContentStore() content.Store {
|
||||||
return contentservice.NewStoreFromClient(contentapi.NewContentClient(c.conn))
|
return contentservice.NewStoreFromClient(contentapi.NewContentClient(c.conn))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Client) SnapshotService() snapshot.Snapshotter {
|
func (c *Client) SnapshotService(snapshotterName string) snapshot.Snapshotter {
|
||||||
return snapshotservice.NewSnapshotterFromClient(snapshotapi.NewSnapshotClient(c.conn))
|
return snapshotservice.NewSnapshotterFromClient(snapshotapi.NewSnapshotsClient(c.conn), snapshotterName)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Client) TaskService() execution.TasksClient {
|
func (c *Client) TaskService() tasks.TasksClient {
|
||||||
return execution.NewTasksClient(c.conn)
|
return tasks.NewTasksClient(c.conn)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Client) ImageService() images.Store {
|
func (c *Client) ImageService() images.Store {
|
||||||
@ -461,6 +531,10 @@ func (c *Client) HealthService() grpc_health_v1.HealthClient {
|
|||||||
return grpc_health_v1.NewHealthClient(c.conn)
|
return grpc_health_v1.NewHealthClient(c.conn)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Client) EventService() eventsapi.EventsClient {
|
||||||
|
return eventsapi.NewEventsClient(c.conn)
|
||||||
|
}
|
||||||
|
|
||||||
func (c *Client) VersionService() versionservice.VersionClient {
|
func (c *Client) VersionService() versionservice.VersionClient {
|
||||||
return versionservice.NewVersionClient(c.conn)
|
return versionservice.NewVersionClient(c.conn)
|
||||||
}
|
}
|
||||||
@ -480,3 +554,120 @@ func (c *Client) Version(ctx context.Context) (Version, error) {
|
|||||||
Revision: response.Revision,
|
Revision: response.Revision,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type imageFormat string
|
||||||
|
|
||||||
|
const (
|
||||||
|
ociImageFormat imageFormat = "oci"
|
||||||
|
)
|
||||||
|
|
||||||
|
type importOpts struct {
|
||||||
|
format imageFormat
|
||||||
|
refObject string
|
||||||
|
}
|
||||||
|
|
||||||
|
type ImportOpt func(c *importOpts) error
|
||||||
|
|
||||||
|
func WithOCIImportFormat() ImportOpt {
|
||||||
|
return func(c *importOpts) error {
|
||||||
|
if c.format != "" {
|
||||||
|
return errors.New("format already set")
|
||||||
|
}
|
||||||
|
c.format = ociImageFormat
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithRefObject specifies the ref object to import.
|
||||||
|
// If refObject is empty, it is copied from the ref argument of Import().
|
||||||
|
func WithRefObject(refObject string) ImportOpt {
|
||||||
|
return func(c *importOpts) error {
|
||||||
|
c.refObject = refObject
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func resolveImportOpt(ref string, opts ...ImportOpt) (importOpts, error) {
|
||||||
|
var iopts importOpts
|
||||||
|
for _, o := range opts {
|
||||||
|
if err := o(&iopts); err != nil {
|
||||||
|
return iopts, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// use OCI as the default format
|
||||||
|
if iopts.format == "" {
|
||||||
|
iopts.format = ociImageFormat
|
||||||
|
}
|
||||||
|
// if refObject is not explicitly specified, use the one specified in ref
|
||||||
|
if iopts.refObject == "" {
|
||||||
|
refSpec, err := reference.Parse(ref)
|
||||||
|
if err != nil {
|
||||||
|
return iopts, err
|
||||||
|
}
|
||||||
|
iopts.refObject = refSpec.Object
|
||||||
|
}
|
||||||
|
return iopts, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Import imports an image from a Tar stream using reader.
|
||||||
|
// OCI format is assumed by default.
|
||||||
|
//
|
||||||
|
// Note that unreferenced blobs are imported to the content store as well.
|
||||||
|
func (c *Client) Import(ctx context.Context, ref string, reader io.Reader, opts ...ImportOpt) (Image, error) {
|
||||||
|
iopts, err := resolveImportOpt(ref, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
switch iopts.format {
|
||||||
|
case ociImageFormat:
|
||||||
|
return c.importFromOCITar(ctx, ref, reader, iopts)
|
||||||
|
default:
|
||||||
|
return nil, errors.Errorf("unsupported format: %s", iopts.format)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type exportOpts struct {
|
||||||
|
format imageFormat
|
||||||
|
}
|
||||||
|
|
||||||
|
type ExportOpt func(c *exportOpts) error
|
||||||
|
|
||||||
|
func WithOCIExportFormat() ExportOpt {
|
||||||
|
return func(c *exportOpts) error {
|
||||||
|
if c.format != "" {
|
||||||
|
return errors.New("format already set")
|
||||||
|
}
|
||||||
|
c.format = ociImageFormat
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: add WithMediaTypeTranslation that transforms media types according to the format.
|
||||||
|
// e.g. application/vnd.docker.image.rootfs.diff.tar.gzip
|
||||||
|
// -> application/vnd.oci.image.layer.v1.tar+gzip
|
||||||
|
|
||||||
|
// Export exports an image to a Tar stream.
|
||||||
|
// OCI format is used by default.
|
||||||
|
// It is up to caller to put "org.opencontainers.image.ref.name" annotation to desc.
|
||||||
|
func (c *Client) Export(ctx context.Context, desc ocispec.Descriptor, opts ...ExportOpt) (io.ReadCloser, error) {
|
||||||
|
var eopts exportOpts
|
||||||
|
for _, o := range opts {
|
||||||
|
if err := o(&eopts); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// use OCI as the default format
|
||||||
|
if eopts.format == "" {
|
||||||
|
eopts.format = ociImageFormat
|
||||||
|
}
|
||||||
|
pr, pw := io.Pipe()
|
||||||
|
switch eopts.format {
|
||||||
|
case ociImageFormat:
|
||||||
|
go func() {
|
||||||
|
pw.CloseWithError(c.exportToOCITar(ctx, desc, pw, eopts))
|
||||||
|
}()
|
||||||
|
default:
|
||||||
|
return nil, errors.Errorf("unsupported format: %s", eopts.format)
|
||||||
|
}
|
||||||
|
return pr, nil
|
||||||
|
}
|
||||||
|
175
vendor/github.com/containerd/containerd/container.go
generated
vendored
175
vendor/github.com/containerd/containerd/container.go
generated
vendored
@ -4,34 +4,34 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"google.golang.org/grpc"
|
"github.com/containerd/containerd/api/services/tasks/v1"
|
||||||
"google.golang.org/grpc/codes"
|
"github.com/containerd/containerd/api/types"
|
||||||
|
"github.com/containerd/containerd/containers"
|
||||||
"github.com/containerd/containerd/api/services/containers"
|
"github.com/containerd/containerd/errdefs"
|
||||||
"github.com/containerd/containerd/api/services/execution"
|
"github.com/containerd/containerd/mount"
|
||||||
"github.com/containerd/containerd/api/types/mount"
|
"github.com/containerd/containerd/typeurl"
|
||||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
type DeleteOpts func(context.Context, *Client, containers.Container) error
|
||||||
ErrNoImage = errors.New("container does not have an image")
|
|
||||||
ErrNoRunningTask = errors.New("no running task")
|
|
||||||
)
|
|
||||||
|
|
||||||
type Container interface {
|
type Container interface {
|
||||||
ID() string
|
ID() string
|
||||||
Proto() containers.Container
|
Info() containers.Container
|
||||||
Delete(context.Context) error
|
Delete(context.Context, ...DeleteOpts) error
|
||||||
NewTask(context.Context, IOCreation, ...NewTaskOpts) (Task, error)
|
NewTask(context.Context, IOCreation, ...NewTaskOpts) (Task, error)
|
||||||
Spec() (*specs.Spec, error)
|
Spec() (*specs.Spec, error)
|
||||||
Task(context.Context, IOAttach) (Task, error)
|
Task(context.Context, IOAttach) (Task, error)
|
||||||
Image(context.Context) (Image, error)
|
Image(context.Context) (Image, error)
|
||||||
|
Labels(context.Context) (map[string]string, error)
|
||||||
|
SetLabels(context.Context, map[string]string) (map[string]string, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
func containerFromProto(client *Client, c containers.Container) *container {
|
func containerFromRecord(client *Client, c containers.Container) *container {
|
||||||
return &container{
|
return &container{
|
||||||
client: client,
|
client: client,
|
||||||
c: c,
|
c: c,
|
||||||
@ -45,7 +45,6 @@ type container struct {
|
|||||||
|
|
||||||
client *Client
|
client *Client
|
||||||
c containers.Container
|
c containers.Container
|
||||||
task *task
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ID returns the container's unique id
|
// ID returns the container's unique id
|
||||||
@ -53,10 +52,53 @@ func (c *container) ID() string {
|
|||||||
return c.c.ID
|
return c.c.ID
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *container) Proto() containers.Container {
|
func (c *container) Info() containers.Container {
|
||||||
return c.c
|
return c.c
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *container) Labels(ctx context.Context) (map[string]string, error) {
|
||||||
|
r, err := c.client.ContainerService().Get(ctx, c.ID())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
c.c = r
|
||||||
|
|
||||||
|
m := make(map[string]string, len(r.Labels))
|
||||||
|
for k, v := range c.c.Labels {
|
||||||
|
m[k] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
return m, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *container) SetLabels(ctx context.Context, labels map[string]string) (map[string]string, error) {
|
||||||
|
container := containers.Container{
|
||||||
|
ID: c.ID(),
|
||||||
|
Labels: labels,
|
||||||
|
}
|
||||||
|
|
||||||
|
var paths []string
|
||||||
|
// mask off paths so we only muck with the labels encountered in labels.
|
||||||
|
// Labels not in the passed in argument will be left alone.
|
||||||
|
for k := range labels {
|
||||||
|
paths = append(paths, strings.Join([]string{"labels", k}, "."))
|
||||||
|
}
|
||||||
|
|
||||||
|
r, err := c.client.ContainerService().Update(ctx, container, paths...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
c.c = r // update our local container
|
||||||
|
|
||||||
|
m := make(map[string]string, len(r.Labels))
|
||||||
|
for k, v := range c.c.Labels {
|
||||||
|
m[k] = v
|
||||||
|
}
|
||||||
|
return m, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Spec returns the current OCI specification for the container
|
// Spec returns the current OCI specification for the container
|
||||||
func (c *container) Spec() (*specs.Spec, error) {
|
func (c *container) Spec() (*specs.Spec, error) {
|
||||||
var s specs.Spec
|
var s specs.Spec
|
||||||
@ -66,43 +108,44 @@ func (c *container) Spec() (*specs.Spec, error) {
|
|||||||
return &s, nil
|
return &s, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithSnapshotCleanup deletes the rootfs allocated for the container
|
||||||
|
func WithSnapshotCleanup(ctx context.Context, client *Client, c containers.Container) error {
|
||||||
|
if c.RootFS != "" {
|
||||||
|
return client.SnapshotService(c.Snapshotter).Remove(ctx, c.RootFS)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// Delete deletes an existing container
|
// Delete deletes an existing container
|
||||||
// an error is returned if the container has running tasks
|
// an error is returned if the container has running tasks
|
||||||
func (c *container) Delete(ctx context.Context) (err error) {
|
func (c *container) Delete(ctx context.Context, opts ...DeleteOpts) (err error) {
|
||||||
// TODO: should the client be the one removing resources attached
|
if _, err := c.Task(ctx, nil); err == nil {
|
||||||
// to the container at the moment before we have GC?
|
return errors.Wrapf(errdefs.ErrFailedPrecondition, "cannot delete running task %v", c.ID())
|
||||||
if c.c.RootFS != "" {
|
|
||||||
err = c.client.SnapshotService().Remove(ctx, c.c.RootFS)
|
|
||||||
}
|
}
|
||||||
if _, cerr := c.client.ContainerService().Delete(ctx, &containers.DeleteContainerRequest{
|
for _, o := range opts {
|
||||||
ID: c.c.ID,
|
if err := o(ctx, c.client, c.c); err != nil {
|
||||||
}); err == nil {
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if cerr := c.client.ContainerService().Delete(ctx, c.ID()); err == nil {
|
||||||
err = cerr
|
err = cerr
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *container) Task(ctx context.Context, attach IOAttach) (Task, error) {
|
func (c *container) Task(ctx context.Context, attach IOAttach) (Task, error) {
|
||||||
c.mu.Lock()
|
return c.loadTask(ctx, attach)
|
||||||
defer c.mu.Unlock()
|
|
||||||
if c.task == nil {
|
|
||||||
t, err := c.loadTask(ctx, attach)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
c.task = t.(*task)
|
|
||||||
}
|
|
||||||
return c.task, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Image returns the image that the container is based on
|
// Image returns the image that the container is based on
|
||||||
func (c *container) Image(ctx context.Context) (Image, error) {
|
func (c *container) Image(ctx context.Context) (Image, error) {
|
||||||
if c.c.Image == "" {
|
if c.c.Image == "" {
|
||||||
return nil, ErrNoImage
|
return nil, errors.Wrapf(errdefs.ErrNotFound, "container not created from an image")
|
||||||
}
|
}
|
||||||
i, err := c.client.ImageService().Get(ctx, c.c.Image)
|
i, err := c.client.ImageService().Get(ctx, c.c.Image)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, errors.Wrapf(err, "failed to get image for container")
|
||||||
}
|
}
|
||||||
return &image{
|
return &image{
|
||||||
client: c.client,
|
client: c.client,
|
||||||
@ -110,16 +153,23 @@ func (c *container) Image(ctx context.Context) (Image, error) {
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type NewTaskOpts func(context.Context, *Client, *execution.CreateRequest) error
|
type NewTaskOpts func(context.Context, *Client, *TaskInfo) error
|
||||||
|
|
||||||
|
func WithRootFS(mounts []mount.Mount) NewTaskOpts {
|
||||||
|
return func(ctx context.Context, c *Client, ti *TaskInfo) error {
|
||||||
|
ti.RootFS = mounts
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (c *container) NewTask(ctx context.Context, ioCreate IOCreation, opts ...NewTaskOpts) (Task, error) {
|
func (c *container) NewTask(ctx context.Context, ioCreate IOCreation, opts ...NewTaskOpts) (Task, error) {
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
defer c.mu.Unlock()
|
defer c.mu.Unlock()
|
||||||
i, err := ioCreate()
|
i, err := ioCreate(c.c.ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
request := &execution.CreateRequest{
|
request := &tasks.CreateTaskRequest{
|
||||||
ContainerID: c.c.ID,
|
ContainerID: c.c.ID,
|
||||||
Terminal: i.Terminal,
|
Terminal: i.Terminal,
|
||||||
Stdin: i.Stdin,
|
Stdin: i.Stdin,
|
||||||
@ -128,31 +178,47 @@ func (c *container) NewTask(ctx context.Context, ioCreate IOCreation, opts ...Ne
|
|||||||
}
|
}
|
||||||
if c.c.RootFS != "" {
|
if c.c.RootFS != "" {
|
||||||
// get the rootfs from the snapshotter and add it to the request
|
// get the rootfs from the snapshotter and add it to the request
|
||||||
mounts, err := c.client.SnapshotService().Mounts(ctx, c.c.RootFS)
|
mounts, err := c.client.SnapshotService(c.c.Snapshotter).Mounts(ctx, c.c.RootFS)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
for _, m := range mounts {
|
for _, m := range mounts {
|
||||||
request.Rootfs = append(request.Rootfs, &mount.Mount{
|
request.Rootfs = append(request.Rootfs, &types.Mount{
|
||||||
Type: m.Type,
|
Type: m.Type,
|
||||||
Source: m.Source,
|
Source: m.Source,
|
||||||
Options: m.Options,
|
Options: m.Options,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
var info TaskInfo
|
||||||
for _, o := range opts {
|
for _, o := range opts {
|
||||||
if err := o(ctx, c.client, request); err != nil {
|
if err := o(ctx, c.client, &info); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if info.RootFS != nil {
|
||||||
|
for _, m := range info.RootFS {
|
||||||
|
request.Rootfs = append(request.Rootfs, &types.Mount{
|
||||||
|
Type: m.Type,
|
||||||
|
Source: m.Source,
|
||||||
|
Options: m.Options,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if info.Options != nil {
|
||||||
|
any, err := typeurl.MarshalAny(info.Options)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
request.Options = any
|
||||||
|
}
|
||||||
t := &task{
|
t := &task{
|
||||||
client: c.client,
|
client: c.client,
|
||||||
io: i,
|
io: i,
|
||||||
containerID: c.ID(),
|
id: c.ID(),
|
||||||
pidSync: make(chan struct{}),
|
|
||||||
}
|
}
|
||||||
|
if info.Checkpoint != nil {
|
||||||
if request.Checkpoint != nil {
|
request.Checkpoint = info.Checkpoint
|
||||||
// we need to defer the create call to start
|
// we need to defer the create call to start
|
||||||
t.deferred = request
|
t.deferred = request
|
||||||
} else {
|
} else {
|
||||||
@ -161,26 +227,25 @@ func (c *container) NewTask(ctx context.Context, ioCreate IOCreation, opts ...Ne
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
t.pid = response.Pid
|
t.pid = response.Pid
|
||||||
close(t.pidSync)
|
|
||||||
}
|
}
|
||||||
c.task = t
|
|
||||||
return t, nil
|
return t, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *container) loadTask(ctx context.Context, ioAttach IOAttach) (Task, error) {
|
func (c *container) loadTask(ctx context.Context, ioAttach IOAttach) (Task, error) {
|
||||||
response, err := c.client.TaskService().Info(ctx, &execution.InfoRequest{
|
response, err := c.client.TaskService().Get(ctx, &tasks.GetTaskRequest{
|
||||||
ContainerID: c.c.ID,
|
ContainerID: c.c.ID,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if grpc.Code(errors.Cause(err)) == codes.NotFound {
|
err = errdefs.FromGRPC(err)
|
||||||
return nil, ErrNoRunningTask
|
if errdefs.IsNotFound(err) {
|
||||||
|
return nil, errors.Wrapf(err, "no running task found")
|
||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var i *IO
|
var i *IO
|
||||||
if ioAttach != nil {
|
if ioAttach != nil {
|
||||||
// get the existing fifo paths from the task information stored by the daemon
|
// get the existing fifo paths from the task information stored by the daemon
|
||||||
paths := &FifoSet{
|
paths := &FIFOSet{
|
||||||
Dir: getFifoDir([]string{
|
Dir: getFifoDir([]string{
|
||||||
response.Task.Stdin,
|
response.Task.Stdin,
|
||||||
response.Task.Stdout,
|
response.Task.Stdout,
|
||||||
@ -195,18 +260,12 @@ func (c *container) loadTask(ctx context.Context, ioAttach IOAttach) (Task, erro
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// create and close a channel on load as we already have the pid
|
|
||||||
// and don't want to block calls to Wait(), etc...
|
|
||||||
ps := make(chan struct{})
|
|
||||||
close(ps)
|
|
||||||
t := &task{
|
t := &task{
|
||||||
client: c.client,
|
client: c.client,
|
||||||
io: i,
|
io: i,
|
||||||
containerID: response.Task.ContainerID,
|
id: response.Task.ID,
|
||||||
pid: response.Task.Pid,
|
pid: response.Task.Pid,
|
||||||
pidSync: ps,
|
|
||||||
}
|
}
|
||||||
c.task = t
|
|
||||||
return t, nil
|
return t, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
26
vendor/github.com/containerd/containerd/container_unix.go
generated
vendored
26
vendor/github.com/containerd/containerd/container_unix.go
generated
vendored
@ -8,17 +8,16 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
|
||||||
"github.com/containerd/containerd/api/services/containers"
|
"github.com/containerd/containerd/api/types"
|
||||||
"github.com/containerd/containerd/api/services/execution"
|
"github.com/containerd/containerd/containers"
|
||||||
"github.com/containerd/containerd/api/types/descriptor"
|
|
||||||
"github.com/containerd/containerd/content"
|
"github.com/containerd/containerd/content"
|
||||||
|
"github.com/containerd/containerd/errdefs"
|
||||||
"github.com/containerd/containerd/images"
|
"github.com/containerd/containerd/images"
|
||||||
"github.com/containerd/containerd/snapshot"
|
"github.com/gogo/protobuf/proto"
|
||||||
protobuf "github.com/gogo/protobuf/types"
|
protobuf "github.com/gogo/protobuf/types"
|
||||||
digest "github.com/opencontainers/go-digest"
|
digest "github.com/opencontainers/go-digest"
|
||||||
"github.com/opencontainers/image-spec/identity"
|
"github.com/opencontainers/image-spec/identity"
|
||||||
"github.com/opencontainers/image-spec/specs-go/v1"
|
"github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func WithCheckpoint(desc v1.Descriptor, rootfsID string) NewContainerOpts {
|
func WithCheckpoint(desc v1.Descriptor, rootfsID string) NewContainerOpts {
|
||||||
@ -45,8 +44,8 @@ func WithCheckpoint(desc v1.Descriptor, rootfsID string) NewContainerOpts {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if _, err := client.SnapshotService().Prepare(ctx, rootfsID, identity.ChainID(diffIDs).String()); err != nil {
|
if _, err := client.SnapshotService(c.Snapshotter).Prepare(ctx, rootfsID, identity.ChainID(diffIDs).String()); err != nil {
|
||||||
if !snapshot.IsExist(err) {
|
if !errdefs.IsAlreadyExists(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -61,15 +60,16 @@ func WithCheckpoint(desc v1.Descriptor, rootfsID string) NewContainerOpts {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
c.Spec = &protobuf.Any{
|
var any protobuf.Any
|
||||||
TypeUrl: specs.Version,
|
if err := proto.Unmarshal(data, &any); err != nil {
|
||||||
Value: data,
|
return err
|
||||||
}
|
}
|
||||||
|
c.Spec = &any
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if rw != nil {
|
if rw != nil {
|
||||||
// apply the rw snapshot to the new rw layer
|
// apply the rw snapshot to the new rw layer
|
||||||
mounts, err := client.SnapshotService().Mounts(ctx, rootfsID)
|
mounts, err := client.SnapshotService(c.Snapshotter).Mounts(ctx, rootfsID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -83,7 +83,7 @@ func WithCheckpoint(desc v1.Descriptor, rootfsID string) NewContainerOpts {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func WithTaskCheckpoint(desc v1.Descriptor) NewTaskOpts {
|
func WithTaskCheckpoint(desc v1.Descriptor) NewTaskOpts {
|
||||||
return func(ctx context.Context, c *Client, r *execution.CreateRequest) error {
|
return func(ctx context.Context, c *Client, info *TaskInfo) error {
|
||||||
id := desc.Digest
|
id := desc.Digest
|
||||||
index, err := decodeIndex(ctx, c.ContentStore(), id)
|
index, err := decodeIndex(ctx, c.ContentStore(), id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -91,7 +91,7 @@ func WithTaskCheckpoint(desc v1.Descriptor) NewTaskOpts {
|
|||||||
}
|
}
|
||||||
for _, m := range index.Manifests {
|
for _, m := range index.Manifests {
|
||||||
if m.MediaType == images.MediaTypeContainerd1Checkpoint {
|
if m.MediaType == images.MediaTypeContainerd1Checkpoint {
|
||||||
r.Checkpoint = &descriptor.Descriptor{
|
info.Checkpoint = &types.Descriptor{
|
||||||
MediaType: m.MediaType,
|
MediaType: m.MediaType,
|
||||||
Size_: m.Size,
|
Size_: m.Size,
|
||||||
Digest: m.Digest,
|
Digest: m.Digest,
|
||||||
|
13
vendor/github.com/containerd/containerd/containerd.service
generated
vendored
Normal file
13
vendor/github.com/containerd/containerd/containerd.service
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
[Unit]
|
||||||
|
Description=containerd container runtime
|
||||||
|
Documentation=https://containerd.io
|
||||||
|
After=network.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
ExecStartPre=/sbin/modprobe overlay
|
||||||
|
ExecStart=/usr/local/bin/containerd
|
||||||
|
Delegate=yes
|
||||||
|
KillMode=process
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
25
vendor/github.com/containerd/containerd/containers/containers.go
generated
vendored
25
vendor/github.com/containerd/containerd/containers/containers.go
generated
vendored
@ -3,6 +3,8 @@ package containers
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/gogo/protobuf/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Container represents the set of data pinned by a container. Unless otherwise
|
// Container represents the set of data pinned by a container. Unless otherwise
|
||||||
@ -13,17 +15,32 @@ type Container struct {
|
|||||||
ID string
|
ID string
|
||||||
Labels map[string]string
|
Labels map[string]string
|
||||||
Image string
|
Image string
|
||||||
Runtime string
|
Runtime RuntimeInfo
|
||||||
Spec []byte
|
Spec *types.Any
|
||||||
RootFS string
|
RootFS string
|
||||||
|
Snapshotter string
|
||||||
CreatedAt time.Time
|
CreatedAt time.Time
|
||||||
UpdatedAt time.Time
|
UpdatedAt time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type RuntimeInfo struct {
|
||||||
|
Name string
|
||||||
|
Options *types.Any
|
||||||
|
}
|
||||||
|
|
||||||
type Store interface {
|
type Store interface {
|
||||||
Get(ctx context.Context, id string) (Container, error)
|
Get(ctx context.Context, id string) (Container, error)
|
||||||
List(ctx context.Context, filter string) ([]Container, error)
|
|
||||||
|
// List returns containers that match one or more of the provided filters.
|
||||||
|
List(ctx context.Context, filters ...string) ([]Container, error)
|
||||||
|
|
||||||
Create(ctx context.Context, container Container) (Container, error)
|
Create(ctx context.Context, container Container) (Container, error)
|
||||||
Update(ctx context.Context, container Container) (Container, error)
|
|
||||||
|
// Update the container with the provided container object. ID must be set.
|
||||||
|
//
|
||||||
|
// If one or more fieldpaths are provided, only the field corresponding to
|
||||||
|
// the fieldpaths will be mutated.
|
||||||
|
Update(ctx context.Context, container Container, fieldpaths ...string) (Container, error)
|
||||||
|
|
||||||
Delete(ctx context.Context, id string) error
|
Delete(ctx context.Context, id string) error
|
||||||
}
|
}
|
||||||
|
130
vendor/github.com/containerd/containerd/containerstore.go
generated
vendored
Normal file
130
vendor/github.com/containerd/containerd/containerstore.go
generated
vendored
Normal file
@ -0,0 +1,130 @@
|
|||||||
|
package containerd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
containersapi "github.com/containerd/containerd/api/services/containers/v1"
|
||||||
|
"github.com/containerd/containerd/containers"
|
||||||
|
"github.com/containerd/containerd/errdefs"
|
||||||
|
ptypes "github.com/gogo/protobuf/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type remoteContainers struct {
|
||||||
|
client containersapi.ContainersClient
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ containers.Store = &remoteContainers{}
|
||||||
|
|
||||||
|
func NewRemoteContainerStore(client containersapi.ContainersClient) containers.Store {
|
||||||
|
return &remoteContainers{
|
||||||
|
client: client,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *remoteContainers) Get(ctx context.Context, id string) (containers.Container, error) {
|
||||||
|
resp, err := r.client.Get(ctx, &containersapi.GetContainerRequest{
|
||||||
|
ID: id,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return containers.Container{}, errdefs.FromGRPC(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return containerFromProto(&resp.Container), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *remoteContainers) List(ctx context.Context, filters ...string) ([]containers.Container, error) {
|
||||||
|
resp, err := r.client.List(ctx, &containersapi.ListContainersRequest{
|
||||||
|
Filters: filters,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, errdefs.FromGRPC(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return containersFromProto(resp.Containers), nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *remoteContainers) Create(ctx context.Context, container containers.Container) (containers.Container, error) {
|
||||||
|
created, err := r.client.Create(ctx, &containersapi.CreateContainerRequest{
|
||||||
|
Container: containerToProto(&container),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return containers.Container{}, errdefs.FromGRPC(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return containerFromProto(&created.Container), nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *remoteContainers) Update(ctx context.Context, container containers.Container, fieldpaths ...string) (containers.Container, error) {
|
||||||
|
var updateMask *ptypes.FieldMask
|
||||||
|
if len(fieldpaths) > 0 {
|
||||||
|
updateMask = &ptypes.FieldMask{
|
||||||
|
Paths: fieldpaths,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
updated, err := r.client.Update(ctx, &containersapi.UpdateContainerRequest{
|
||||||
|
Container: containerToProto(&container),
|
||||||
|
UpdateMask: updateMask,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return containers.Container{}, errdefs.FromGRPC(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return containerFromProto(&updated.Container), nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *remoteContainers) Delete(ctx context.Context, id string) error {
|
||||||
|
_, err := r.client.Delete(ctx, &containersapi.DeleteContainerRequest{
|
||||||
|
ID: id,
|
||||||
|
})
|
||||||
|
|
||||||
|
return errdefs.FromGRPC(err)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func containerToProto(container *containers.Container) containersapi.Container {
|
||||||
|
return containersapi.Container{
|
||||||
|
ID: container.ID,
|
||||||
|
Labels: container.Labels,
|
||||||
|
Image: container.Image,
|
||||||
|
Runtime: &containersapi.Container_Runtime{
|
||||||
|
Name: container.Runtime.Name,
|
||||||
|
Options: container.Runtime.Options,
|
||||||
|
},
|
||||||
|
Spec: container.Spec,
|
||||||
|
Snapshotter: container.Snapshotter,
|
||||||
|
RootFS: container.RootFS,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func containerFromProto(containerpb *containersapi.Container) containers.Container {
|
||||||
|
var runtime containers.RuntimeInfo
|
||||||
|
if containerpb.Runtime != nil {
|
||||||
|
runtime = containers.RuntimeInfo{
|
||||||
|
Name: containerpb.Runtime.Name,
|
||||||
|
Options: containerpb.Runtime.Options,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return containers.Container{
|
||||||
|
ID: containerpb.ID,
|
||||||
|
Labels: containerpb.Labels,
|
||||||
|
Image: containerpb.Image,
|
||||||
|
Runtime: runtime,
|
||||||
|
Spec: containerpb.Spec,
|
||||||
|
Snapshotter: containerpb.Snapshotter,
|
||||||
|
RootFS: containerpb.RootFS,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func containersFromProto(containerspb []containersapi.Container) []containers.Container {
|
||||||
|
var containers []containers.Container
|
||||||
|
|
||||||
|
for _, container := range containerspb {
|
||||||
|
containers = append(containers, containerFromProto(&container))
|
||||||
|
}
|
||||||
|
|
||||||
|
return containers
|
||||||
|
}
|
79
vendor/github.com/containerd/containerd/content/content.go
generated
vendored
79
vendor/github.com/containerd/containerd/content/content.go
generated
vendored
@ -3,35 +3,10 @@ package content
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"io"
|
"io"
|
||||||
"sync"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd/oci"
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// ErrNotFound is returned when an item is not found.
|
|
||||||
//
|
|
||||||
// Use IsNotFound(err) to detect this condition.
|
|
||||||
ErrNotFound = errors.New("content: not found")
|
|
||||||
|
|
||||||
// ErrExists is returned when something exists when it may not be expected.
|
|
||||||
//
|
|
||||||
// Use IsExists(err) to detect this condition.
|
|
||||||
ErrExists = errors.New("content: exists")
|
|
||||||
|
|
||||||
// ErrLocked is returned when content is actively being uploaded, this
|
|
||||||
// indicates that another process is attempting to upload the same content.
|
|
||||||
//
|
|
||||||
// Use IsLocked(err) to detect this condition.
|
|
||||||
ErrLocked = errors.New("content: locked")
|
|
||||||
|
|
||||||
bufPool = sync.Pool{
|
|
||||||
New: func() interface{} {
|
|
||||||
return make([]byte, 1<<20)
|
|
||||||
},
|
|
||||||
}
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type Provider interface {
|
type Provider interface {
|
||||||
@ -48,7 +23,9 @@ type Ingester interface {
|
|||||||
type Info struct {
|
type Info struct {
|
||||||
Digest digest.Digest
|
Digest digest.Digest
|
||||||
Size int64
|
Size int64
|
||||||
CommittedAt time.Time
|
CreatedAt time.Time
|
||||||
|
UpdatedAt time.Time
|
||||||
|
Labels map[string]string
|
||||||
}
|
}
|
||||||
|
|
||||||
type Status struct {
|
type Status struct {
|
||||||
@ -70,32 +47,39 @@ type Manager interface {
|
|||||||
// If the content is not present, ErrNotFound will be returned.
|
// If the content is not present, ErrNotFound will be returned.
|
||||||
Info(ctx context.Context, dgst digest.Digest) (Info, error)
|
Info(ctx context.Context, dgst digest.Digest) (Info, error)
|
||||||
|
|
||||||
// Walk will call fn for each item in the content store.
|
// Update updates mutable information related to content.
|
||||||
Walk(ctx context.Context, fn WalkFunc) error
|
// If one or more fieldpaths are provided, only those
|
||||||
|
// fields will be updated.
|
||||||
|
// Mutable fields:
|
||||||
|
// labels.*
|
||||||
|
Update(ctx context.Context, info Info, fieldpaths ...string) (Info, error)
|
||||||
|
|
||||||
|
// Walk will call fn for each item in the content store which
|
||||||
|
// match the provided filters. If no filters are given all
|
||||||
|
// items will be walked.
|
||||||
|
Walk(ctx context.Context, fn WalkFunc, filters ...string) error
|
||||||
|
|
||||||
// Delete removes the content from the store.
|
// Delete removes the content from the store.
|
||||||
Delete(ctx context.Context, dgst digest.Digest) error
|
Delete(ctx context.Context, dgst digest.Digest) error
|
||||||
|
}
|
||||||
|
|
||||||
// Status returns the status of any active ingestions whose ref match the
|
// IngestManager provides methods for managing ingests.
|
||||||
|
type IngestManager interface {
|
||||||
|
// Status returns the status of the provided ref.
|
||||||
|
Status(ctx context.Context, ref string) (Status, error)
|
||||||
|
|
||||||
|
// ListStatuses returns the status of any active ingestions whose ref match the
|
||||||
// provided regular expression. If empty, all active ingestions will be
|
// provided regular expression. If empty, all active ingestions will be
|
||||||
// returned.
|
// returned.
|
||||||
//
|
ListStatuses(ctx context.Context, filters ...string) ([]Status, error)
|
||||||
// TODO(stevvooe): Status may be slighly out of place here. If this remains
|
|
||||||
// here, we should remove Manager and just define these on store.
|
|
||||||
Status(ctx context.Context, re string) ([]Status, error)
|
|
||||||
|
|
||||||
// Abort completely cancels the ingest operation targeted by ref.
|
// Abort completely cancels the ingest operation targeted by ref.
|
||||||
//
|
|
||||||
// TODO(stevvooe): Same consideration as above. This should really be
|
|
||||||
// restricted to an ingest management interface.
|
|
||||||
Abort(ctx context.Context, ref string) error
|
Abort(ctx context.Context, ref string) error
|
||||||
}
|
}
|
||||||
|
|
||||||
type Writer interface {
|
type Writer interface {
|
||||||
io.WriteCloser
|
oci.BlobWriter
|
||||||
Status() (Status, error)
|
Status() (Status, error)
|
||||||
Digest() digest.Digest
|
|
||||||
Commit(size int64, expected digest.Digest) error
|
|
||||||
Truncate(size int64) error
|
Truncate(size int64) error
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -103,18 +87,7 @@ type Writer interface {
|
|||||||
// are commonly provided by complete implementations.
|
// are commonly provided by complete implementations.
|
||||||
type Store interface {
|
type Store interface {
|
||||||
Manager
|
Manager
|
||||||
Ingester
|
|
||||||
Provider
|
Provider
|
||||||
}
|
IngestManager
|
||||||
|
Ingester
|
||||||
func IsNotFound(err error) bool {
|
|
||||||
return errors.Cause(err) == ErrNotFound
|
|
||||||
}
|
|
||||||
|
|
||||||
func IsExists(err error) bool {
|
|
||||||
return errors.Cause(err) == ErrExists
|
|
||||||
}
|
|
||||||
|
|
||||||
func IsLocked(err error) bool {
|
|
||||||
return errors.Cause(err) == ErrLocked
|
|
||||||
}
|
}
|
||||||
|
19
vendor/github.com/containerd/containerd/content/helpers.go
generated
vendored
19
vendor/github.com/containerd/containerd/content/helpers.go
generated
vendored
@ -5,11 +5,21 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd/errdefs"
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
bufPool = sync.Pool{
|
||||||
|
New: func() interface{} {
|
||||||
|
return make([]byte, 1<<20)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
// ReadBlob retrieves the entire contents of the blob from the provider.
|
// ReadBlob retrieves the entire contents of the blob from the provider.
|
||||||
//
|
//
|
||||||
// Avoid using this for large blobs, such as layers.
|
// Avoid using this for large blobs, such as layers.
|
||||||
@ -33,7 +43,7 @@ func ReadBlob(ctx context.Context, provider Provider, dgst digest.Digest) ([]byt
|
|||||||
func WriteBlob(ctx context.Context, cs Ingester, ref string, r io.Reader, size int64, expected digest.Digest) error {
|
func WriteBlob(ctx context.Context, cs Ingester, ref string, r io.Reader, size int64, expected digest.Digest) error {
|
||||||
cw, err := cs.Writer(ctx, ref, size, expected)
|
cw, err := cs.Writer(ctx, ref, size, expected)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !IsExists(err) {
|
if !errdefs.IsAlreadyExists(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -79,7 +89,7 @@ func Copy(cw Writer, r io.Reader, size int64, expected digest.Digest) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := cw.Commit(size, expected); err != nil {
|
if err := cw.Commit(size, expected); err != nil {
|
||||||
if !IsExists(err) {
|
if !errdefs.IsAlreadyExists(err) {
|
||||||
return errors.Wrapf(err, "failed commit on ref %q", ws.Ref)
|
return errors.Wrapf(err, "failed commit on ref %q", ws.Ref)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -120,8 +130,3 @@ func seekReader(r io.Reader, offset, size int64) (io.Reader, error) {
|
|||||||
|
|
||||||
return r, errors.Wrapf(errUnseekable, "seek to offset %v failed", offset)
|
return r, errors.Wrapf(errUnseekable, "seek to offset %v failed", offset)
|
||||||
}
|
}
|
||||||
|
|
||||||
func readFileString(path string) (string, error) {
|
|
||||||
p, err := ioutil.ReadFile(path)
|
|
||||||
return string(p), err
|
|
||||||
}
|
|
||||||
|
37
vendor/github.com/containerd/containerd/content/locks.go
generated
vendored
37
vendor/github.com/containerd/containerd/content/locks.go
generated
vendored
@ -1,37 +0,0 @@
|
|||||||
package content
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Handles locking references
|
|
||||||
// TODO: use boltdb for lock status
|
|
||||||
|
|
||||||
var (
|
|
||||||
// locks lets us lock in process
|
|
||||||
locks = map[string]struct{}{}
|
|
||||||
locksMu sync.Mutex
|
|
||||||
)
|
|
||||||
|
|
||||||
func tryLock(ref string) error {
|
|
||||||
locksMu.Lock()
|
|
||||||
defer locksMu.Unlock()
|
|
||||||
|
|
||||||
if _, ok := locks[ref]; ok {
|
|
||||||
return errors.Wrapf(ErrLocked, "key %s is locked", ref)
|
|
||||||
}
|
|
||||||
|
|
||||||
locks[ref] = struct{}{}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func unlock(ref string) {
|
|
||||||
locksMu.Lock()
|
|
||||||
defer locksMu.Unlock()
|
|
||||||
|
|
||||||
if _, ok := locks[ref]; ok {
|
|
||||||
delete(locks, ref)
|
|
||||||
}
|
|
||||||
}
|
|
26
vendor/github.com/containerd/containerd/content/readerat.go
generated
vendored
26
vendor/github.com/containerd/containerd/content/readerat.go
generated
vendored
@ -1,26 +0,0 @@
|
|||||||
package content
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
)
|
|
||||||
|
|
||||||
// readerat implements io.ReaderAt in a completely stateless manner by opening
|
|
||||||
// the referenced file for each call to ReadAt.
|
|
||||||
type readerAt struct {
|
|
||||||
f string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ra readerAt) ReadAt(p []byte, offset int64) (int, error) {
|
|
||||||
fp, err := os.Open(ra.f)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
defer fp.Close()
|
|
||||||
|
|
||||||
if _, err := fp.Seek(offset, io.SeekStart); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return fp.Read(p)
|
|
||||||
}
|
|
358
vendor/github.com/containerd/containerd/content/store.go
generated
vendored
358
vendor/github.com/containerd/containerd/content/store.go
generated
vendored
@ -1,358 +0,0 @@
|
|||||||
package content
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"regexp"
|
|
||||||
"strconv"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/containerd/containerd/log"
|
|
||||||
digest "github.com/opencontainers/go-digest"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Store is digest-keyed store for content. All data written into the store is
|
|
||||||
// stored under a verifiable digest.
|
|
||||||
//
|
|
||||||
// Store can generally support multi-reader, single-writer ingest of data,
|
|
||||||
// including resumable ingest.
|
|
||||||
type store struct {
|
|
||||||
root string
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewStore(root string) (Store, error) {
|
|
||||||
if err := os.MkdirAll(filepath.Join(root, "ingest"), 0777); err != nil && !os.IsExist(err) {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &store{
|
|
||||||
root: root,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *store) Info(ctx context.Context, dgst digest.Digest) (Info, error) {
|
|
||||||
p := s.blobPath(dgst)
|
|
||||||
fi, err := os.Stat(p)
|
|
||||||
if err != nil {
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
err = ErrNotFound
|
|
||||||
}
|
|
||||||
|
|
||||||
return Info{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return s.info(dgst, fi), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *store) info(dgst digest.Digest, fi os.FileInfo) Info {
|
|
||||||
return Info{
|
|
||||||
Digest: dgst,
|
|
||||||
Size: fi.Size(),
|
|
||||||
CommittedAt: fi.ModTime(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reader returns an io.ReadCloser for the blob.
|
|
||||||
func (s *store) Reader(ctx context.Context, dgst digest.Digest) (io.ReadCloser, error) {
|
|
||||||
fp, err := os.Open(s.blobPath(dgst))
|
|
||||||
if err != nil {
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
err = ErrNotFound
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return fp, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReaderAt returns an io.ReaderAt for the blob.
|
|
||||||
func (s *store) ReaderAt(ctx context.Context, dgst digest.Digest) (io.ReaderAt, error) {
|
|
||||||
return readerAt{f: s.blobPath(dgst)}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete removes a blob by its digest.
|
|
||||||
//
|
|
||||||
// While this is safe to do concurrently, safe exist-removal logic must hold
|
|
||||||
// some global lock on the store.
|
|
||||||
func (cs *store) Delete(ctx context.Context, dgst digest.Digest) error {
|
|
||||||
if err := os.RemoveAll(cs.blobPath(dgst)); err != nil {
|
|
||||||
if !os.IsNotExist(err) {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return ErrNotFound
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO(stevvooe): Allow querying the set of blobs in the blob store.
|
|
||||||
|
|
||||||
func (cs *store) Walk(ctx context.Context, fn WalkFunc) error {
|
|
||||||
root := filepath.Join(cs.root, "blobs")
|
|
||||||
var alg digest.Algorithm
|
|
||||||
return filepath.Walk(root, func(path string, fi os.FileInfo, err error) error {
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !fi.IsDir() && !alg.Available() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO(stevvooe): There are few more cases with subdirs that should be
|
|
||||||
// handled in case the layout gets corrupted. This isn't strict enough
|
|
||||||
// an may spew bad data.
|
|
||||||
|
|
||||||
if path == root {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if filepath.Dir(path) == root {
|
|
||||||
alg = digest.Algorithm(filepath.Base(path))
|
|
||||||
|
|
||||||
if !alg.Available() {
|
|
||||||
alg = ""
|
|
||||||
return filepath.SkipDir
|
|
||||||
}
|
|
||||||
|
|
||||||
// descending into a hash directory
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
dgst := digest.NewDigestFromHex(alg.String(), filepath.Base(path))
|
|
||||||
if err := dgst.Validate(); err != nil {
|
|
||||||
// log error but don't report
|
|
||||||
log.L.WithError(err).WithField("path", path).Error("invalid digest for blob path")
|
|
||||||
// if we see this, it could mean some sort of corruption of the
|
|
||||||
// store or extra paths not expected previously.
|
|
||||||
}
|
|
||||||
|
|
||||||
return fn(cs.info(dgst, fi))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *store) Status(ctx context.Context, re string) ([]Status, error) {
|
|
||||||
fp, err := os.Open(filepath.Join(s.root, "ingest"))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
defer fp.Close()
|
|
||||||
|
|
||||||
fis, err := fp.Readdir(-1)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
rec, err := regexp.Compile(re)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var active []Status
|
|
||||||
for _, fi := range fis {
|
|
||||||
p := filepath.Join(s.root, "ingest", fi.Name())
|
|
||||||
stat, err := s.status(p)
|
|
||||||
if err != nil {
|
|
||||||
if !os.IsNotExist(err) {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO(stevvooe): This is a common error if uploads are being
|
|
||||||
// completed while making this listing. Need to consider taking a
|
|
||||||
// lock on the whole store to coordinate this aspect.
|
|
||||||
//
|
|
||||||
// Another option is to cleanup downloads asynchronously and
|
|
||||||
// coordinate this method with the cleanup process.
|
|
||||||
//
|
|
||||||
// For now, we just skip them, as they really don't exist.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if !rec.MatchString(stat.Ref) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
active = append(active, stat)
|
|
||||||
}
|
|
||||||
|
|
||||||
return active, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// status works like stat above except uses the path to the ingest.
|
|
||||||
func (s *store) status(ingestPath string) (Status, error) {
|
|
||||||
dp := filepath.Join(ingestPath, "data")
|
|
||||||
fi, err := os.Stat(dp)
|
|
||||||
if err != nil {
|
|
||||||
return Status{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
ref, err := readFileString(filepath.Join(ingestPath, "ref"))
|
|
||||||
if err != nil {
|
|
||||||
return Status{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return Status{
|
|
||||||
Ref: ref,
|
|
||||||
Offset: fi.Size(),
|
|
||||||
Total: s.total(ingestPath),
|
|
||||||
UpdatedAt: fi.ModTime(),
|
|
||||||
StartedAt: getStartTime(fi),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// total attempts to resolve the total expected size for the write.
|
|
||||||
func (s *store) total(ingestPath string) int64 {
|
|
||||||
totalS, err := readFileString(filepath.Join(ingestPath, "total"))
|
|
||||||
if err != nil {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
total, err := strconv.ParseInt(totalS, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
// represents a corrupted file, should probably remove.
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
return total
|
|
||||||
}
|
|
||||||
|
|
||||||
// Writer begins or resumes the active writer identified by ref. If the writer
|
|
||||||
// is already in use, an error is returned. Only one writer may be in use per
|
|
||||||
// ref at a time.
|
|
||||||
//
|
|
||||||
// The argument `ref` is used to uniquely identify a long-lived writer transaction.
|
|
||||||
func (s *store) Writer(ctx context.Context, ref string, total int64, expected digest.Digest) (Writer, error) {
|
|
||||||
// TODO(stevvooe): Need to actually store and handle expected here. We have
|
|
||||||
// code in the service that shouldn't be dealing with this.
|
|
||||||
|
|
||||||
path, refp, data := s.ingestPaths(ref)
|
|
||||||
|
|
||||||
if err := tryLock(ref); err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "locking %v failed", ref)
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
digester = digest.Canonical.Digester()
|
|
||||||
offset int64
|
|
||||||
startedAt time.Time
|
|
||||||
updatedAt time.Time
|
|
||||||
)
|
|
||||||
|
|
||||||
// ensure that the ingest path has been created.
|
|
||||||
if err := os.Mkdir(path, 0755); err != nil {
|
|
||||||
if !os.IsExist(err) {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
status, err := s.status(path)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "failed reading status of resume write")
|
|
||||||
}
|
|
||||||
|
|
||||||
if ref != status.Ref {
|
|
||||||
// NOTE(stevvooe): This is fairly catastrophic. Either we have some
|
|
||||||
// layout corruption or a hash collision for the ref key.
|
|
||||||
return nil, errors.Wrapf(err, "ref key does not match: %v != %v", ref, status.Ref)
|
|
||||||
}
|
|
||||||
|
|
||||||
if total > 0 && status.Total > 0 && total != status.Total {
|
|
||||||
return nil, errors.Errorf("provided total differs from status: %v != %v", total, status.Total)
|
|
||||||
}
|
|
||||||
|
|
||||||
// slow slow slow!!, send to goroutine or use resumable hashes
|
|
||||||
fp, err := os.Open(data)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer fp.Close()
|
|
||||||
|
|
||||||
p := bufPool.Get().([]byte)
|
|
||||||
defer bufPool.Put(p)
|
|
||||||
|
|
||||||
offset, err = io.CopyBuffer(digester.Hash(), fp, p)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
updatedAt = status.UpdatedAt
|
|
||||||
startedAt = status.StartedAt
|
|
||||||
total = status.Total
|
|
||||||
} else {
|
|
||||||
// the ingest is new, we need to setup the target location.
|
|
||||||
// write the ref to a file for later use
|
|
||||||
if err := ioutil.WriteFile(refp, []byte(ref), 0666); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if total > 0 {
|
|
||||||
if err := ioutil.WriteFile(filepath.Join(path, "total"), []byte(fmt.Sprint(total)), 0666); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
startedAt = time.Now()
|
|
||||||
updatedAt = startedAt
|
|
||||||
}
|
|
||||||
|
|
||||||
fp, err := os.OpenFile(data, os.O_WRONLY|os.O_CREATE, 0666)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "failed to open data file")
|
|
||||||
}
|
|
||||||
|
|
||||||
return &writer{
|
|
||||||
s: s,
|
|
||||||
fp: fp,
|
|
||||||
ref: ref,
|
|
||||||
path: path,
|
|
||||||
offset: offset,
|
|
||||||
total: total,
|
|
||||||
digester: digester,
|
|
||||||
startedAt: startedAt,
|
|
||||||
updatedAt: updatedAt,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Abort an active transaction keyed by ref. If the ingest is active, it will
|
|
||||||
// be cancelled. Any resources associated with the ingest will be cleaned.
|
|
||||||
func (s *store) Abort(ctx context.Context, ref string) error {
|
|
||||||
root := s.ingestRoot(ref)
|
|
||||||
if err := os.RemoveAll(root); err != nil {
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
return ErrNotFound
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cs *store) blobPath(dgst digest.Digest) string {
|
|
||||||
return filepath.Join(cs.root, "blobs", dgst.Algorithm().String(), dgst.Hex())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *store) ingestRoot(ref string) string {
|
|
||||||
dgst := digest.FromString(ref)
|
|
||||||
return filepath.Join(s.root, "ingest", dgst.Hex())
|
|
||||||
}
|
|
||||||
|
|
||||||
// ingestPaths are returned. The paths are the following:
|
|
||||||
//
|
|
||||||
// - root: entire ingest directory
|
|
||||||
// - ref: name of the starting ref, must be unique
|
|
||||||
// - data: file where data is written
|
|
||||||
//
|
|
||||||
func (s *store) ingestPaths(ref string) (string, string, string) {
|
|
||||||
var (
|
|
||||||
fp = s.ingestRoot(ref)
|
|
||||||
rp = filepath.Join(fp, "ref")
|
|
||||||
dp = filepath.Join(fp, "data")
|
|
||||||
)
|
|
||||||
|
|
||||||
return fp, rp, dp
|
|
||||||
}
|
|
15
vendor/github.com/containerd/containerd/content/store_linux.go
generated
vendored
15
vendor/github.com/containerd/containerd/content/store_linux.go
generated
vendored
@ -1,15 +0,0 @@
|
|||||||
package content
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"syscall"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func getStartTime(fi os.FileInfo) time.Time {
|
|
||||||
if st, ok := fi.Sys().(*syscall.Stat_t); ok {
|
|
||||||
return time.Unix(int64(st.Ctim.Sec), int64(st.Ctim.Nsec))
|
|
||||||
}
|
|
||||||
|
|
||||||
return fi.ModTime()
|
|
||||||
}
|
|
17
vendor/github.com/containerd/containerd/content/store_unix.go
generated
vendored
17
vendor/github.com/containerd/containerd/content/store_unix.go
generated
vendored
@ -1,17 +0,0 @@
|
|||||||
// +build darwin freebsd
|
|
||||||
|
|
||||||
package content
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"syscall"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func getStartTime(fi os.FileInfo) time.Time {
|
|
||||||
if st, ok := fi.Sys().(*syscall.Stat_t); ok {
|
|
||||||
return time.Unix(int64(st.Ctimespec.Sec), int64(st.Ctimespec.Nsec))
|
|
||||||
}
|
|
||||||
|
|
||||||
return fi.ModTime()
|
|
||||||
}
|
|
10
vendor/github.com/containerd/containerd/content/store_windows.go
generated
vendored
10
vendor/github.com/containerd/containerd/content/store_windows.go
generated
vendored
@ -1,10 +0,0 @@
|
|||||||
package content
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func getStartTime(fi os.FileInfo) time.Time {
|
|
||||||
return fi.ModTime()
|
|
||||||
}
|
|
140
vendor/github.com/containerd/containerd/content/writer.go
generated
vendored
140
vendor/github.com/containerd/containerd/content/writer.go
generated
vendored
@ -1,140 +0,0 @@
|
|||||||
package content
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/opencontainers/go-digest"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// writer represents a write transaction against the blob store.
|
|
||||||
type writer struct {
|
|
||||||
s *store
|
|
||||||
fp *os.File // opened data file
|
|
||||||
path string // path to writer dir
|
|
||||||
ref string // ref key
|
|
||||||
offset int64
|
|
||||||
total int64
|
|
||||||
digester digest.Digester
|
|
||||||
startedAt time.Time
|
|
||||||
updatedAt time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *writer) Status() (Status, error) {
|
|
||||||
return Status{
|
|
||||||
Ref: w.ref,
|
|
||||||
Offset: w.offset,
|
|
||||||
Total: w.total,
|
|
||||||
StartedAt: w.startedAt,
|
|
||||||
UpdatedAt: w.updatedAt,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Digest returns the current digest of the content, up to the current write.
|
|
||||||
//
|
|
||||||
// Cannot be called concurrently with `Write`.
|
|
||||||
func (w *writer) Digest() digest.Digest {
|
|
||||||
return w.digester.Digest()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write p to the transaction.
|
|
||||||
//
|
|
||||||
// Note that writes are unbuffered to the backing file. When writing, it is
|
|
||||||
// recommended to wrap in a bufio.Writer or, preferably, use io.CopyBuffer.
|
|
||||||
func (w *writer) Write(p []byte) (n int, err error) {
|
|
||||||
n, err = w.fp.Write(p)
|
|
||||||
w.digester.Hash().Write(p[:n])
|
|
||||||
w.offset += int64(len(p))
|
|
||||||
w.updatedAt = time.Now()
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *writer) Commit(size int64, expected digest.Digest) error {
|
|
||||||
if err := w.fp.Sync(); err != nil {
|
|
||||||
return errors.Wrap(err, "sync failed")
|
|
||||||
}
|
|
||||||
|
|
||||||
fi, err := w.fp.Stat()
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "stat on ingest file failed")
|
|
||||||
}
|
|
||||||
|
|
||||||
// change to readonly, more important for read, but provides _some_
|
|
||||||
// protection from this point on. We use the existing perms with a mask
|
|
||||||
// only allowing reads honoring the umask on creation.
|
|
||||||
//
|
|
||||||
// This removes write and exec, only allowing read per the creation umask.
|
|
||||||
if err := w.fp.Chmod((fi.Mode() & os.ModePerm) &^ 0333); err != nil {
|
|
||||||
return errors.Wrap(err, "failed to change ingest file permissions")
|
|
||||||
}
|
|
||||||
|
|
||||||
if size > 0 && size != fi.Size() {
|
|
||||||
return errors.Errorf("%q failed size validation: %v != %v", w.ref, fi.Size(), size)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := w.fp.Close(); err != nil {
|
|
||||||
return errors.Wrap(err, "failed closing ingest")
|
|
||||||
}
|
|
||||||
|
|
||||||
dgst := w.digester.Digest()
|
|
||||||
if expected != "" && expected != dgst {
|
|
||||||
return errors.Errorf("unexpected digest: %v != %v", dgst, expected)
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
ingest = filepath.Join(w.path, "data")
|
|
||||||
target = w.s.blobPath(dgst)
|
|
||||||
)
|
|
||||||
|
|
||||||
// make sure parent directories of blob exist
|
|
||||||
if err := os.MkdirAll(filepath.Dir(target), 0755); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// clean up!!
|
|
||||||
defer os.RemoveAll(w.path)
|
|
||||||
|
|
||||||
if err := os.Rename(ingest, target); err != nil {
|
|
||||||
if os.IsExist(err) {
|
|
||||||
// collision with the target file!
|
|
||||||
return ErrExists
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
unlock(w.ref)
|
|
||||||
w.fp = nil
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close the writer, flushing any unwritten data and leaving the progress in
|
|
||||||
// tact.
|
|
||||||
//
|
|
||||||
// If one needs to resume the transaction, a new writer can be obtained from
|
|
||||||
// `ContentStore.Resume` using the same key. The write can then be continued
|
|
||||||
// from it was left off.
|
|
||||||
//
|
|
||||||
// To abandon a transaction completely, first call close then `Store.Remove` to
|
|
||||||
// clean up the associated resources.
|
|
||||||
func (cw *writer) Close() (err error) {
|
|
||||||
unlock(cw.ref)
|
|
||||||
|
|
||||||
if cw.fp != nil {
|
|
||||||
cw.fp.Sync()
|
|
||||||
return cw.fp.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *writer) Truncate(size int64) error {
|
|
||||||
if size != 0 {
|
|
||||||
return errors.New("Truncate: unsupported size")
|
|
||||||
}
|
|
||||||
w.offset = 0
|
|
||||||
w.digester.Hash().Reset()
|
|
||||||
return w.fp.Truncate(0)
|
|
||||||
}
|
|
54
vendor/github.com/containerd/containerd/errdefs/errors.go
generated
vendored
Normal file
54
vendor/github.com/containerd/containerd/errdefs/errors.go
generated
vendored
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
// Package errdefs defines the common errors used throughout containerd
|
||||||
|
// packages.
|
||||||
|
//
|
||||||
|
// Use with errors.Wrap and error.Wrapf to add context to an error.
|
||||||
|
//
|
||||||
|
// To detect an error class, use the IsXXX functions to tell whether an error
|
||||||
|
// is of a certain type.
|
||||||
|
//
|
||||||
|
// The functions ToGRPC and FromGRPC can be used to map server-side and
|
||||||
|
// client-side errors to the correct types.
|
||||||
|
package errdefs
|
||||||
|
|
||||||
|
import "github.com/pkg/errors"
|
||||||
|
|
||||||
|
// Definitions of common error types used throughout containerd. All containerd
|
||||||
|
// errors returned by most packages will map into one of these errors classes.
|
||||||
|
// Packages should return errors of these types when they want to instruct a
|
||||||
|
// client to take a particular action.
|
||||||
|
//
|
||||||
|
// For the most part, we just try to provide local grpc errors. Most conditions
|
||||||
|
// map very well to those defined by grpc.
|
||||||
|
var (
|
||||||
|
ErrUnknown = errors.New("unknown") // used internally to represent a missed mapping.
|
||||||
|
ErrInvalidArgument = errors.New("invalid argument")
|
||||||
|
ErrNotFound = errors.New("not found")
|
||||||
|
ErrAlreadyExists = errors.New("already exists")
|
||||||
|
ErrFailedPrecondition = errors.New("failed precondition")
|
||||||
|
ErrUnavailable = errors.New("unavailable")
|
||||||
|
)
|
||||||
|
|
||||||
|
func IsInvalidArgument(err error) bool {
|
||||||
|
return errors.Cause(err) == ErrInvalidArgument
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsNotFound returns true if the error is due to a missing object
|
||||||
|
func IsNotFound(err error) bool {
|
||||||
|
return errors.Cause(err) == ErrNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsAlreadyExists returns true if the error is due to an already existing
|
||||||
|
// metadata item
|
||||||
|
func IsAlreadyExists(err error) bool {
|
||||||
|
return errors.Cause(err) == ErrAlreadyExists
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsFailedPrecondition returns true if an operation could not proceed to the
|
||||||
|
// lack of a particular condition.
|
||||||
|
func IsFailedPrecondition(err error) bool {
|
||||||
|
return errors.Cause(err) == ErrFailedPrecondition
|
||||||
|
}
|
||||||
|
|
||||||
|
func IsUnavailable(err error) bool {
|
||||||
|
return errors.Cause(err) == ErrUnavailable
|
||||||
|
}
|
106
vendor/github.com/containerd/containerd/errdefs/grpc.go
generated
vendored
Normal file
106
vendor/github.com/containerd/containerd/errdefs/grpc.go
generated
vendored
Normal file
@ -0,0 +1,106 @@
|
|||||||
|
package errdefs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ToGRPC will attempt to map the backend containerd error into a grpc error,
|
||||||
|
// using the original error message as a description.
|
||||||
|
//
|
||||||
|
// Further information may be extracted from certain errors depending on their
|
||||||
|
// type.
|
||||||
|
//
|
||||||
|
// If the error is unmapped, the original error will be returned to be handled
|
||||||
|
// by the regular grpc error handling stack.
|
||||||
|
func ToGRPC(err error) error {
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if isGRPCError(err) {
|
||||||
|
// error has already been mapped to grpc
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case IsInvalidArgument(err):
|
||||||
|
return grpc.Errorf(codes.InvalidArgument, err.Error())
|
||||||
|
case IsNotFound(err):
|
||||||
|
return grpc.Errorf(codes.NotFound, err.Error())
|
||||||
|
case IsAlreadyExists(err):
|
||||||
|
return grpc.Errorf(codes.AlreadyExists, err.Error())
|
||||||
|
case IsFailedPrecondition(err):
|
||||||
|
return grpc.Errorf(codes.FailedPrecondition, err.Error())
|
||||||
|
case IsUnavailable(err):
|
||||||
|
return grpc.Errorf(codes.Unavailable, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToGRPCf maps the error to grpc error codes, assembling the formatting string
|
||||||
|
// and combining it with the target error string.
|
||||||
|
//
|
||||||
|
// This is equivalent to errors.ToGRPC(errors.Wrapf(err, format, args...))
|
||||||
|
func ToGRPCf(err error, format string, args ...interface{}) error {
|
||||||
|
return ToGRPC(errors.Wrapf(err, format, args...))
|
||||||
|
}
|
||||||
|
|
||||||
|
func FromGRPC(err error) error {
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var cls error // divide these into error classes, becomes the cause
|
||||||
|
|
||||||
|
switch grpc.Code(err) {
|
||||||
|
case codes.InvalidArgument:
|
||||||
|
cls = ErrInvalidArgument
|
||||||
|
case codes.AlreadyExists:
|
||||||
|
cls = ErrAlreadyExists
|
||||||
|
case codes.NotFound:
|
||||||
|
cls = ErrNotFound
|
||||||
|
case codes.Unavailable:
|
||||||
|
cls = ErrUnavailable
|
||||||
|
case codes.FailedPrecondition:
|
||||||
|
cls = ErrFailedPrecondition
|
||||||
|
default:
|
||||||
|
cls = ErrUnknown
|
||||||
|
}
|
||||||
|
|
||||||
|
if cls != nil {
|
||||||
|
msg := rebaseMessage(cls, err)
|
||||||
|
if msg != "" {
|
||||||
|
err = errors.Wrapf(cls, msg)
|
||||||
|
} else {
|
||||||
|
err = cls
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// rebaseMessage removes the repeats for an error at the end of an error
|
||||||
|
// string. This will happen when taking an error over grpc then remapping it.
|
||||||
|
//
|
||||||
|
// Effectively, we just remove the string of cls from the end of err if it
|
||||||
|
// appears there.
|
||||||
|
func rebaseMessage(cls error, err error) string {
|
||||||
|
desc := grpc.ErrorDesc(err)
|
||||||
|
clss := cls.Error()
|
||||||
|
if desc == clss {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.TrimSuffix(desc, ": "+clss)
|
||||||
|
}
|
||||||
|
|
||||||
|
func isGRPCError(err error) bool {
|
||||||
|
_, ok := status.FromError(err)
|
||||||
|
return ok
|
||||||
|
}
|
24
vendor/github.com/containerd/containerd/events/events.go
generated
vendored
Normal file
24
vendor/github.com/containerd/containerd/events/events.go
generated
vendored
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
package events
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
events "github.com/containerd/containerd/api/services/events/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Event interface{}
|
||||||
|
|
||||||
|
// Publisher posts the event.
|
||||||
|
type Publisher interface {
|
||||||
|
Publish(ctx context.Context, topic string, event Event) error
|
||||||
|
}
|
||||||
|
|
||||||
|
type Forwarder interface {
|
||||||
|
Forward(ctx context.Context, envelope *events.Envelope) error
|
||||||
|
}
|
||||||
|
|
||||||
|
type publisherFunc func(ctx context.Context, topic string, event Event) error
|
||||||
|
|
||||||
|
func (fn publisherFunc) Publish(ctx context.Context, topic string, event Event) error {
|
||||||
|
return fn(ctx, topic, event)
|
||||||
|
}
|
163
vendor/github.com/containerd/containerd/events/exchange.go
generated
vendored
Normal file
163
vendor/github.com/containerd/containerd/events/exchange.go
generated
vendored
Normal file
@ -0,0 +1,163 @@
|
|||||||
|
package events
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
events "github.com/containerd/containerd/api/services/events/v1"
|
||||||
|
"github.com/containerd/containerd/errdefs"
|
||||||
|
"github.com/containerd/containerd/filters"
|
||||||
|
"github.com/containerd/containerd/identifiers"
|
||||||
|
"github.com/containerd/containerd/log"
|
||||||
|
"github.com/containerd/containerd/namespaces"
|
||||||
|
"github.com/containerd/containerd/typeurl"
|
||||||
|
goevents "github.com/docker/go-events"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Exchange struct {
|
||||||
|
broadcaster *goevents.Broadcaster
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewExchange() *Exchange {
|
||||||
|
return &Exchange{
|
||||||
|
broadcaster: goevents.NewBroadcaster(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Forward accepts an envelope to be direcly distributed on the exchange.
|
||||||
|
func (e *Exchange) Forward(ctx context.Context, envelope *events.Envelope) error {
|
||||||
|
log.G(ctx).WithFields(logrus.Fields{
|
||||||
|
"topic": envelope.Topic,
|
||||||
|
"ns": envelope.Namespace,
|
||||||
|
"type": envelope.Event.TypeUrl,
|
||||||
|
}).Debug("forward event")
|
||||||
|
|
||||||
|
if err := namespaces.Validate(envelope.Namespace); err != nil {
|
||||||
|
return errors.Wrapf(err, "event envelope has invalid namespace")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := validateTopic(envelope.Topic); err != nil {
|
||||||
|
return errors.Wrapf(err, "envelope topic %q", envelope.Topic)
|
||||||
|
}
|
||||||
|
|
||||||
|
return e.broadcaster.Write(envelope)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Publish packages and sends an event. The caller will be considered the
|
||||||
|
// initial publisher of the event. This means the timestamp will be calculated
|
||||||
|
// at this point and this method may read from the calling context.
|
||||||
|
func (e *Exchange) Publish(ctx context.Context, topic string, event Event) error {
|
||||||
|
namespace, err := namespaces.NamespaceRequired(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed publishing event")
|
||||||
|
}
|
||||||
|
if err := validateTopic(topic); err != nil {
|
||||||
|
return errors.Wrapf(err, "envelope topic %q", topic)
|
||||||
|
}
|
||||||
|
|
||||||
|
evany, err := typeurl.MarshalAny(event)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
env := events.Envelope{
|
||||||
|
Timestamp: time.Now().UTC(),
|
||||||
|
Topic: topic,
|
||||||
|
Event: evany,
|
||||||
|
}
|
||||||
|
if err := e.broadcaster.Write(&env); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.G(ctx).WithFields(logrus.Fields{
|
||||||
|
"topic": topic,
|
||||||
|
"type": evany.TypeUrl,
|
||||||
|
"ns": namespace,
|
||||||
|
}).Debug("published event")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Subscribe to events on the exchange. Events are sent through the returned
|
||||||
|
// channel ch. If an error is encountered, it will be sent on channel errs and
|
||||||
|
// errs will be closed. To end the subscription, cancel the provided context.
|
||||||
|
func (e *Exchange) Subscribe(ctx context.Context, filters ...filters.Filter) (ch <-chan *events.Envelope, errs <-chan error) {
|
||||||
|
var (
|
||||||
|
evch = make(chan *events.Envelope)
|
||||||
|
errq = make(chan error, 1)
|
||||||
|
channel = goevents.NewChannel(0)
|
||||||
|
queue = goevents.NewQueue(channel)
|
||||||
|
)
|
||||||
|
|
||||||
|
// TODO(stevvooe): Insert the filter!
|
||||||
|
|
||||||
|
e.broadcaster.Add(queue)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
defer close(errq)
|
||||||
|
defer e.broadcaster.Remove(queue)
|
||||||
|
defer queue.Close()
|
||||||
|
defer channel.Close()
|
||||||
|
|
||||||
|
var err error
|
||||||
|
loop:
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case ev := <-channel.C:
|
||||||
|
env, ok := ev.(*events.Envelope)
|
||||||
|
if !ok {
|
||||||
|
// TODO(stevvooe): For the most part, we are well protected
|
||||||
|
// from this condition. Both Forward and Publish protect
|
||||||
|
// from this.
|
||||||
|
err = errors.Errorf("invalid envelope encountered %#v; please file a bug", ev)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case evch <- env:
|
||||||
|
case <-ctx.Done():
|
||||||
|
break loop
|
||||||
|
}
|
||||||
|
case <-ctx.Done():
|
||||||
|
break loop
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
if cerr := ctx.Err(); cerr != context.Canceled {
|
||||||
|
err = cerr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
errq <- err
|
||||||
|
}()
|
||||||
|
|
||||||
|
ch = evch
|
||||||
|
errs = errq
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateTopic(topic string) error {
|
||||||
|
if topic == "" {
|
||||||
|
return errors.Wrap(errdefs.ErrInvalidArgument, "must not be empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
if topic[0] != '/' {
|
||||||
|
return errors.Wrapf(errdefs.ErrInvalidArgument, "must start with '/'", topic)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(topic) == 1 {
|
||||||
|
return errors.Wrapf(errdefs.ErrInvalidArgument, "must have at least one component", topic)
|
||||||
|
}
|
||||||
|
|
||||||
|
components := strings.Split(topic[1:], "/")
|
||||||
|
for _, component := range components {
|
||||||
|
if err := identifiers.Validate(component); err != nil {
|
||||||
|
return errors.Wrapf(err, "failed validation on component %q", component)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
65
vendor/github.com/containerd/containerd/export.go
generated
vendored
Normal file
65
vendor/github.com/containerd/containerd/export.go
generated
vendored
Normal file
@ -0,0 +1,65 @@
|
|||||||
|
package containerd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd/content"
|
||||||
|
"github.com/containerd/containerd/images"
|
||||||
|
"github.com/containerd/containerd/oci"
|
||||||
|
ocispecs "github.com/opencontainers/image-spec/specs-go"
|
||||||
|
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (c *Client) exportToOCITar(ctx context.Context, desc ocispec.Descriptor, writer io.Writer, eopts exportOpts) error {
|
||||||
|
tw := tar.NewWriter(writer)
|
||||||
|
img := oci.Tar(tw)
|
||||||
|
|
||||||
|
// For tar, we defer creating index until end of the function.
|
||||||
|
if err := oci.Init(img, oci.InitOpts{SkipCreateIndex: true}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
cs := c.ContentStore()
|
||||||
|
handlers := images.Handlers(
|
||||||
|
images.ChildrenHandler(cs),
|
||||||
|
exportHandler(cs, img),
|
||||||
|
)
|
||||||
|
// For tar, we need to use Walk instead of Dispatch for ensuring sequential write
|
||||||
|
if err := images.Walk(ctx, handlers, desc); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// For tar, we don't use oci.PutManifestDescriptorToIndex() which allows appending desc to existing index.json
|
||||||
|
// but requires img to support random read access so as to read index.json.
|
||||||
|
return oci.WriteIndex(img,
|
||||||
|
ocispec.Index{
|
||||||
|
Versioned: ocispecs.Versioned{
|
||||||
|
SchemaVersion: 2,
|
||||||
|
},
|
||||||
|
Manifests: []ocispec.Descriptor{desc},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func exportHandler(cs content.Store, img oci.ImageDriver) images.HandlerFunc {
|
||||||
|
return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||||
|
r, err := cs.Reader(ctx, desc.Digest)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
w, err := oci.NewBlobWriter(img, desc.Digest.Algorithm())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if _, err = io.Copy(w, r); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err = w.Commit(desc.Size, desc.Digest); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err = w.Close(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
}
|
15
vendor/github.com/containerd/containerd/filters/adaptor.go
generated
vendored
Normal file
15
vendor/github.com/containerd/containerd/filters/adaptor.go
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
package filters
|
||||||
|
|
||||||
|
// Adaptor specifies the mapping of fieldpaths to a type. For the given field
|
||||||
|
// path, the value and whether it is present should be returned. The mapping of
|
||||||
|
// the fieldpath to a field is deferred to the adaptor implementation, but
|
||||||
|
// should generally follow protobuf field path/mask semantics.
|
||||||
|
type Adaptor interface {
|
||||||
|
Field(fieldpath []string) (value string, present bool)
|
||||||
|
}
|
||||||
|
|
||||||
|
type AdapterFunc func(fieldpath []string) (string, bool)
|
||||||
|
|
||||||
|
func (fn AdapterFunc) Field(fieldpath []string) (string, bool) {
|
||||||
|
return fn(fieldpath)
|
||||||
|
}
|
155
vendor/github.com/containerd/containerd/filters/filter.go
generated
vendored
Normal file
155
vendor/github.com/containerd/containerd/filters/filter.go
generated
vendored
Normal file
@ -0,0 +1,155 @@
|
|||||||
|
// Package filters defines a syntax and parser that can be used for the
|
||||||
|
// filtration of items across the containerd API. The core is built on the
|
||||||
|
// concept of protobuf field paths, with quoting. Several operators allow the
|
||||||
|
// user to flexibly select items based on field presence, equality, inequality
|
||||||
|
// and regular expressions. Flexible adaptors support working with any type.
|
||||||
|
//
|
||||||
|
// The syntax is fairly familiar, if you've used container ecosystem
|
||||||
|
// projects. At the core, we base it on the concept of protobuf field
|
||||||
|
// paths, augmenting with the ability to quote portions of the field path
|
||||||
|
// to match arbitrary labels. These "selectors" come in the following
|
||||||
|
// syntax:
|
||||||
|
//
|
||||||
|
// ```
|
||||||
|
// <fieldpath>[<operator><value>]
|
||||||
|
// ```
|
||||||
|
//
|
||||||
|
// A basic example is as follows:
|
||||||
|
//
|
||||||
|
// ```
|
||||||
|
// name==foo
|
||||||
|
// ```
|
||||||
|
//
|
||||||
|
// This would match all objects that have a field `name` with the value
|
||||||
|
// `foo`. If we only want to test if the field is present, we can omit the
|
||||||
|
// operator. This is most useful for matching labels in containerd. The
|
||||||
|
// following will match objects that have the field "labels" and have the
|
||||||
|
// label "foo" defined:
|
||||||
|
//
|
||||||
|
// ```
|
||||||
|
// labels.foo
|
||||||
|
// ```
|
||||||
|
//
|
||||||
|
// We also allow for quoting of parts of the field path to allow matching
|
||||||
|
// of arbitrary items:
|
||||||
|
//
|
||||||
|
// ```
|
||||||
|
// labels."very complex label"==something
|
||||||
|
// ```
|
||||||
|
//
|
||||||
|
// We also define `!=` and `~=` as operators. The `!=` will match all
|
||||||
|
// objects that don't match the value for a field and `~=` will compile the
|
||||||
|
// target value as a regular expression and match the field value against that.
|
||||||
|
//
|
||||||
|
// Selectors can be combined using a comma, such that the resulting
|
||||||
|
// selector will require all selectors are matched for the object to match.
|
||||||
|
// The following example will match objects that are named `foo` and have
|
||||||
|
// the label `bar`:
|
||||||
|
//
|
||||||
|
// ```
|
||||||
|
// name==foo,labels.bar
|
||||||
|
// ```
|
||||||
|
//
|
||||||
|
package filters
|
||||||
|
|
||||||
|
import (
|
||||||
|
"regexp"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Filter interface {
|
||||||
|
Match(adaptor Adaptor) bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type FilterFunc func(Adaptor) bool
|
||||||
|
|
||||||
|
func (fn FilterFunc) Match(adaptor Adaptor) bool {
|
||||||
|
return fn(adaptor)
|
||||||
|
}
|
||||||
|
|
||||||
|
var Always FilterFunc = func(adaptor Adaptor) bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
type Any []Filter
|
||||||
|
|
||||||
|
func (m Any) Match(adaptor Adaptor) bool {
|
||||||
|
for _, m := range m {
|
||||||
|
if m.Match(adaptor) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
type All []Filter
|
||||||
|
|
||||||
|
func (m All) Match(adaptor Adaptor) bool {
|
||||||
|
for _, m := range m {
|
||||||
|
if !m.Match(adaptor) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
type operator int
|
||||||
|
|
||||||
|
const (
|
||||||
|
operatorPresent = iota
|
||||||
|
operatorEqual
|
||||||
|
operatorNotEqual
|
||||||
|
operatorMatches
|
||||||
|
)
|
||||||
|
|
||||||
|
func (op operator) String() string {
|
||||||
|
switch op {
|
||||||
|
case operatorPresent:
|
||||||
|
return "?"
|
||||||
|
case operatorEqual:
|
||||||
|
return "=="
|
||||||
|
case operatorNotEqual:
|
||||||
|
return "!="
|
||||||
|
case operatorMatches:
|
||||||
|
return "~="
|
||||||
|
}
|
||||||
|
|
||||||
|
return "unknown"
|
||||||
|
}
|
||||||
|
|
||||||
|
type selector struct {
|
||||||
|
fieldpath []string
|
||||||
|
operator operator
|
||||||
|
value string
|
||||||
|
re *regexp.Regexp
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m selector) Match(adaptor Adaptor) bool {
|
||||||
|
value, present := adaptor.Field(m.fieldpath)
|
||||||
|
|
||||||
|
switch m.operator {
|
||||||
|
case operatorPresent:
|
||||||
|
return present
|
||||||
|
case operatorEqual:
|
||||||
|
return present && value == m.value
|
||||||
|
case operatorNotEqual:
|
||||||
|
return value != m.value
|
||||||
|
case operatorMatches:
|
||||||
|
if m.re == nil {
|
||||||
|
r, err := regexp.Compile(m.value)
|
||||||
|
if err != nil {
|
||||||
|
log.L.Errorf("error compiling regexp %q", m.value)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
m.re = r
|
||||||
|
}
|
||||||
|
|
||||||
|
return m.re.MatchString(value)
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
262
vendor/github.com/containerd/containerd/filters/parser.go
generated
vendored
Normal file
262
vendor/github.com/containerd/containerd/filters/parser.go
generated
vendored
Normal file
@ -0,0 +1,262 @@
|
|||||||
|
package filters
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd/errdefs"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
Parse the strings into a filter that may be used with an adaptor.
|
||||||
|
|
||||||
|
The filter is made up of zero or more selectors.
|
||||||
|
|
||||||
|
The format is a comma separated list of expressions, in the form of
|
||||||
|
`<fieldpath><op><value>`, known as selectors. All selectors must match the
|
||||||
|
target object for the filter to be true.
|
||||||
|
|
||||||
|
We define the operators "==" for equality, "!=" for not equal and "~=" for a
|
||||||
|
regular expression. If the operator and value are not present, the matcher will
|
||||||
|
test for the presence of a value, as defined by the target object.
|
||||||
|
|
||||||
|
The formal grammar is as follows:
|
||||||
|
|
||||||
|
selectors := selector ("," selector)*
|
||||||
|
selector := fieldpath (operator value)
|
||||||
|
fieldpath := field ('.' field)*
|
||||||
|
field := quoted | [A-Za-z] [A-Za-z0-9_]+
|
||||||
|
operator := "==" | "!=" | "~="
|
||||||
|
value := quoted | [^\s,]+
|
||||||
|
quoted := <go string syntax>
|
||||||
|
|
||||||
|
*/
|
||||||
|
func Parse(s string) (Filter, error) {
|
||||||
|
// special case empty to match all
|
||||||
|
if s == "" {
|
||||||
|
return Always, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
p := parser{input: s}
|
||||||
|
return p.parse()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseAll parses each filter in ss and returns a filter that will return true
|
||||||
|
// if any filter matches the expression.
|
||||||
|
//
|
||||||
|
// If no filters are provided, the filter will match anything.
|
||||||
|
func ParseAll(ss ...string) (Filter, error) {
|
||||||
|
if len(ss) == 0 {
|
||||||
|
return Always, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var fs []Filter
|
||||||
|
for _, s := range ss {
|
||||||
|
f, err := Parse(s)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(errdefs.ErrInvalidArgument, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
fs = append(fs, f)
|
||||||
|
}
|
||||||
|
|
||||||
|
return Any(fs), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type parser struct {
|
||||||
|
input string
|
||||||
|
scanner scanner
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *parser) parse() (Filter, error) {
|
||||||
|
p.scanner.init(p.input)
|
||||||
|
|
||||||
|
ss, err := p.selectors()
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "filters")
|
||||||
|
}
|
||||||
|
|
||||||
|
return ss, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *parser) selectors() (Filter, error) {
|
||||||
|
s, err := p.selector()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ss := All{s}
|
||||||
|
|
||||||
|
loop:
|
||||||
|
for {
|
||||||
|
tok := p.scanner.peek()
|
||||||
|
switch tok {
|
||||||
|
case ',':
|
||||||
|
pos, tok, _ := p.scanner.scan()
|
||||||
|
if tok != tokenSeparator {
|
||||||
|
return nil, p.mkerr(pos, "expected a separator")
|
||||||
|
}
|
||||||
|
|
||||||
|
s, err := p.selector()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ss = append(ss, s)
|
||||||
|
case tokenEOF:
|
||||||
|
break loop
|
||||||
|
default:
|
||||||
|
return nil, p.mkerr(p.scanner.ppos, "unexpected input: %v", string(tok))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ss, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *parser) selector() (selector, error) {
|
||||||
|
fieldpath, err := p.fieldpath()
|
||||||
|
if err != nil {
|
||||||
|
return selector{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch p.scanner.peek() {
|
||||||
|
case ',', tokenSeparator, tokenEOF:
|
||||||
|
return selector{
|
||||||
|
fieldpath: fieldpath,
|
||||||
|
operator: operatorPresent,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
op, err := p.operator()
|
||||||
|
if err != nil {
|
||||||
|
return selector{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
value, err := p.value()
|
||||||
|
if err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
return selector{}, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return selector{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return selector{
|
||||||
|
fieldpath: fieldpath,
|
||||||
|
value: value,
|
||||||
|
operator: op,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *parser) fieldpath() ([]string, error) {
|
||||||
|
f, err := p.field()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
fs := []string{f}
|
||||||
|
loop:
|
||||||
|
for {
|
||||||
|
tok := p.scanner.peek() // lookahead to consume field separator
|
||||||
|
|
||||||
|
switch tok {
|
||||||
|
case '.':
|
||||||
|
pos, tok, _ := p.scanner.scan() // consume separator
|
||||||
|
if tok != tokenSeparator {
|
||||||
|
return nil, p.mkerr(pos, "expected a field separator (`.`)")
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := p.field()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
fs = append(fs, f)
|
||||||
|
default:
|
||||||
|
// let the layer above handle the other bad cases.
|
||||||
|
break loop
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return fs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *parser) field() (string, error) {
|
||||||
|
pos, tok, s := p.scanner.scan()
|
||||||
|
switch tok {
|
||||||
|
case tokenField:
|
||||||
|
return s, nil
|
||||||
|
case tokenQuoted:
|
||||||
|
return p.unquote(pos, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", p.mkerr(pos, "expected field or quoted")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *parser) operator() (operator, error) {
|
||||||
|
pos, tok, s := p.scanner.scan()
|
||||||
|
switch tok {
|
||||||
|
case tokenOperator:
|
||||||
|
switch s {
|
||||||
|
case "==":
|
||||||
|
return operatorEqual, nil
|
||||||
|
case "!=":
|
||||||
|
return operatorNotEqual, nil
|
||||||
|
case "~=":
|
||||||
|
return operatorMatches, nil
|
||||||
|
default:
|
||||||
|
return 0, p.mkerr(pos, "unsupported operator %q", s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0, p.mkerr(pos, `expected an operator ("=="|"!="|"~=")`)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *parser) value() (string, error) {
|
||||||
|
pos, tok, s := p.scanner.scan()
|
||||||
|
|
||||||
|
switch tok {
|
||||||
|
case tokenValue, tokenField:
|
||||||
|
return s, nil
|
||||||
|
case tokenQuoted:
|
||||||
|
return p.unquote(pos, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", p.mkerr(pos, "expected value or quoted")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *parser) unquote(pos int, s string) (string, error) {
|
||||||
|
uq, err := strconv.Unquote(s)
|
||||||
|
if err != nil {
|
||||||
|
return "", p.mkerr(pos, "unquoting failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return uq, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type parseError struct {
|
||||||
|
input string
|
||||||
|
pos int
|
||||||
|
msg string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pe parseError) Error() string {
|
||||||
|
if pe.pos < len(pe.input) {
|
||||||
|
before := pe.input[:pe.pos]
|
||||||
|
location := pe.input[pe.pos : pe.pos+1] // need to handle end
|
||||||
|
after := pe.input[pe.pos+1:]
|
||||||
|
|
||||||
|
return fmt.Sprintf("[%s >|%s|< %s]: %v", before, location, after, pe.msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf("[%s]: %v", pe.input, pe.msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *parser) mkerr(pos int, format string, args ...interface{}) error {
|
||||||
|
return errors.Wrap(parseError{
|
||||||
|
input: p.input,
|
||||||
|
pos: pos,
|
||||||
|
msg: fmt.Sprintf(format, args...),
|
||||||
|
}, "parse error")
|
||||||
|
}
|
268
vendor/github.com/containerd/containerd/filters/scanner.go
generated
vendored
Normal file
268
vendor/github.com/containerd/containerd/filters/scanner.go
generated
vendored
Normal file
@ -0,0 +1,268 @@
|
|||||||
|
package filters
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"unicode"
|
||||||
|
"unicode/utf8"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
tokenEOF = -(iota + 1)
|
||||||
|
tokenQuoted
|
||||||
|
tokenValue
|
||||||
|
tokenField
|
||||||
|
tokenSeparator
|
||||||
|
tokenOperator
|
||||||
|
tokenIllegal
|
||||||
|
)
|
||||||
|
|
||||||
|
type token rune
|
||||||
|
|
||||||
|
func (t token) String() string {
|
||||||
|
switch t {
|
||||||
|
case tokenEOF:
|
||||||
|
return "EOF"
|
||||||
|
case tokenQuoted:
|
||||||
|
return "Quoted"
|
||||||
|
case tokenValue:
|
||||||
|
return "Value"
|
||||||
|
case tokenField:
|
||||||
|
return "Field"
|
||||||
|
case tokenSeparator:
|
||||||
|
return "Separator"
|
||||||
|
case tokenOperator:
|
||||||
|
return "Operator"
|
||||||
|
case tokenIllegal:
|
||||||
|
return "Illegal"
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t token) GoString() string {
|
||||||
|
return "token" + t.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
type scanner struct {
|
||||||
|
input string
|
||||||
|
pos int
|
||||||
|
ppos int // bounds the current rune in the string
|
||||||
|
value bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *scanner) init(input string) {
|
||||||
|
s.input = input
|
||||||
|
s.pos = 0
|
||||||
|
s.ppos = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *scanner) next() rune {
|
||||||
|
if s.pos >= len(s.input) {
|
||||||
|
return tokenEOF
|
||||||
|
}
|
||||||
|
s.pos = s.ppos
|
||||||
|
|
||||||
|
r, w := utf8.DecodeRuneInString(s.input[s.ppos:])
|
||||||
|
s.ppos += w
|
||||||
|
if r == utf8.RuneError {
|
||||||
|
if w > 0 {
|
||||||
|
return tokenIllegal
|
||||||
|
} else {
|
||||||
|
return tokenEOF
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if r == 0 {
|
||||||
|
return tokenIllegal
|
||||||
|
}
|
||||||
|
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *scanner) peek() rune {
|
||||||
|
pos := s.pos
|
||||||
|
ppos := s.ppos
|
||||||
|
ch := s.next()
|
||||||
|
s.pos = pos
|
||||||
|
s.ppos = ppos
|
||||||
|
return ch
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *scanner) scan() (int, token, string) {
|
||||||
|
var (
|
||||||
|
ch = s.next()
|
||||||
|
pos = s.pos
|
||||||
|
)
|
||||||
|
|
||||||
|
chomp:
|
||||||
|
switch {
|
||||||
|
case ch == tokenEOF:
|
||||||
|
case ch == tokenIllegal:
|
||||||
|
case isQuoteRune(ch):
|
||||||
|
s.scanQuoted(ch)
|
||||||
|
return pos, tokenQuoted, s.input[pos:s.ppos]
|
||||||
|
case isSeparatorRune(ch):
|
||||||
|
return pos, tokenSeparator, s.input[pos:s.ppos]
|
||||||
|
case isOperatorRune(ch):
|
||||||
|
s.scanOperator()
|
||||||
|
s.value = true
|
||||||
|
return pos, tokenOperator, s.input[pos:s.ppos]
|
||||||
|
case unicode.IsSpace(ch):
|
||||||
|
// chomp
|
||||||
|
ch = s.next()
|
||||||
|
pos = s.pos
|
||||||
|
goto chomp
|
||||||
|
case s.value:
|
||||||
|
s.scanValue()
|
||||||
|
s.value = false
|
||||||
|
return pos, tokenValue, s.input[pos:s.ppos]
|
||||||
|
case isFieldRune(ch):
|
||||||
|
s.scanField()
|
||||||
|
return pos, tokenField, s.input[pos:s.ppos]
|
||||||
|
}
|
||||||
|
|
||||||
|
return s.pos, token(ch), ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *scanner) scanField() {
|
||||||
|
for {
|
||||||
|
ch := s.peek()
|
||||||
|
if !isFieldRune(ch) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
s.next()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *scanner) scanOperator() {
|
||||||
|
for {
|
||||||
|
ch := s.peek()
|
||||||
|
switch ch {
|
||||||
|
case '=', '!', '~':
|
||||||
|
s.next()
|
||||||
|
default:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *scanner) scanValue() {
|
||||||
|
for {
|
||||||
|
ch := s.peek()
|
||||||
|
if !isValueRune(ch) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
s.next()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *scanner) scanQuoted(quote rune) {
|
||||||
|
ch := s.next() // read character after quote
|
||||||
|
for ch != quote {
|
||||||
|
if ch == '\n' || ch < 0 {
|
||||||
|
s.error("literal not terminated")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if ch == '\\' {
|
||||||
|
ch = s.scanEscape(quote)
|
||||||
|
} else {
|
||||||
|
ch = s.next()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *scanner) scanEscape(quote rune) rune {
|
||||||
|
ch := s.next() // read character after '/'
|
||||||
|
switch ch {
|
||||||
|
case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', quote:
|
||||||
|
// nothing to do
|
||||||
|
ch = s.next()
|
||||||
|
case '0', '1', '2', '3', '4', '5', '6', '7':
|
||||||
|
ch = s.scanDigits(ch, 8, 3)
|
||||||
|
case 'x':
|
||||||
|
ch = s.scanDigits(s.next(), 16, 2)
|
||||||
|
case 'u':
|
||||||
|
ch = s.scanDigits(s.next(), 16, 4)
|
||||||
|
case 'U':
|
||||||
|
ch = s.scanDigits(s.next(), 16, 8)
|
||||||
|
default:
|
||||||
|
s.error("illegal char escape")
|
||||||
|
}
|
||||||
|
return ch
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *scanner) scanDigits(ch rune, base, n int) rune {
|
||||||
|
for n > 0 && digitVal(ch) < base {
|
||||||
|
ch = s.next()
|
||||||
|
n--
|
||||||
|
}
|
||||||
|
if n > 0 {
|
||||||
|
s.error("illegal char escape")
|
||||||
|
}
|
||||||
|
return ch
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *scanner) error(msg string) {
|
||||||
|
fmt.Println("error fixme", msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func digitVal(ch rune) int {
|
||||||
|
switch {
|
||||||
|
case '0' <= ch && ch <= '9':
|
||||||
|
return int(ch - '0')
|
||||||
|
case 'a' <= ch && ch <= 'f':
|
||||||
|
return int(ch - 'a' + 10)
|
||||||
|
case 'A' <= ch && ch <= 'F':
|
||||||
|
return int(ch - 'A' + 10)
|
||||||
|
}
|
||||||
|
return 16 // larger than any legal digit val
|
||||||
|
}
|
||||||
|
|
||||||
|
func isFieldRune(r rune) bool {
|
||||||
|
return (r == '_' || isAlphaRune(r) || isDigitRune(r))
|
||||||
|
}
|
||||||
|
|
||||||
|
func isAlphaRune(r rune) bool {
|
||||||
|
return r >= 'A' && r <= 'Z' || r >= 'a' && r <= 'z'
|
||||||
|
}
|
||||||
|
|
||||||
|
func isDigitRune(r rune) bool {
|
||||||
|
return r >= '0' && r <= '9'
|
||||||
|
}
|
||||||
|
|
||||||
|
func isOperatorRune(r rune) bool {
|
||||||
|
switch r {
|
||||||
|
case '=', '!', '~':
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func isQuoteRune(r rune) bool {
|
||||||
|
switch r {
|
||||||
|
case '"': // maybe add single quoting?
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func isSeparatorRune(r rune) bool {
|
||||||
|
switch r {
|
||||||
|
case ',', '.':
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func isValueRune(r rune) bool {
|
||||||
|
return r != ',' && !unicode.IsSpace(r) &&
|
||||||
|
(unicode.IsLetter(r) ||
|
||||||
|
unicode.IsDigit(r) ||
|
||||||
|
unicode.IsNumber(r) ||
|
||||||
|
unicode.IsGraphic(r) ||
|
||||||
|
unicode.IsPunct(r))
|
||||||
|
}
|
120
vendor/github.com/containerd/containerd/fs/copy.go
generated
vendored
Normal file
120
vendor/github.com/containerd/containerd/fs/copy.go
generated
vendored
Normal file
@ -0,0 +1,120 @@
|
|||||||
|
package fs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
bufferPool = &sync.Pool{
|
||||||
|
New: func() interface{} {
|
||||||
|
return make([]byte, 32*1024)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// CopyDir copies the directory from src to dst.
|
||||||
|
// Most efficient copy of files is attempted.
|
||||||
|
func CopyDir(dst, src string) error {
|
||||||
|
inodes := map[uint64]string{}
|
||||||
|
return copyDirectory(dst, src, inodes)
|
||||||
|
}
|
||||||
|
|
||||||
|
func copyDirectory(dst, src string, inodes map[uint64]string) error {
|
||||||
|
stat, err := os.Stat(src)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to stat %s", src)
|
||||||
|
}
|
||||||
|
if !stat.IsDir() {
|
||||||
|
return errors.Errorf("source is not directory")
|
||||||
|
}
|
||||||
|
|
||||||
|
if st, err := os.Stat(dst); err != nil {
|
||||||
|
if err := os.Mkdir(dst, stat.Mode()); err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to mkdir %s", dst)
|
||||||
|
}
|
||||||
|
} else if !st.IsDir() {
|
||||||
|
return errors.Errorf("cannot copy to non-directory: %s", dst)
|
||||||
|
} else {
|
||||||
|
if err := os.Chmod(dst, stat.Mode()); err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to chmod on %s", dst)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fis, err := ioutil.ReadDir(src)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to read %s", src)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := copyFileInfo(stat, dst); err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to copy file info for %s", dst)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, fi := range fis {
|
||||||
|
source := filepath.Join(src, fi.Name())
|
||||||
|
target := filepath.Join(dst, fi.Name())
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case fi.IsDir():
|
||||||
|
if err := copyDirectory(target, source, inodes); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
case (fi.Mode() & os.ModeType) == 0:
|
||||||
|
link, err := getLinkSource(target, fi, inodes)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "failed to get hardlink")
|
||||||
|
}
|
||||||
|
if link != "" {
|
||||||
|
if err := os.Link(link, target); err != nil {
|
||||||
|
return errors.Wrap(err, "failed to create hard link")
|
||||||
|
}
|
||||||
|
} else if err := copyFile(source, target); err != nil {
|
||||||
|
return errors.Wrap(err, "failed to copy files")
|
||||||
|
}
|
||||||
|
case (fi.Mode() & os.ModeSymlink) == os.ModeSymlink:
|
||||||
|
link, err := os.Readlink(source)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to read link: %s", source)
|
||||||
|
}
|
||||||
|
if err := os.Symlink(link, target); err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to create symlink: %s", target)
|
||||||
|
}
|
||||||
|
case (fi.Mode() & os.ModeDevice) == os.ModeDevice:
|
||||||
|
if err := copyDevice(target, fi); err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to create device")
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
// TODO: Support pipes and sockets
|
||||||
|
return errors.Wrapf(err, "unsupported mode %s", fi.Mode())
|
||||||
|
}
|
||||||
|
if err := copyFileInfo(fi, target); err != nil {
|
||||||
|
return errors.Wrap(err, "failed to copy file info")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := copyXAttrs(target, source); err != nil {
|
||||||
|
return errors.Wrap(err, "failed to copy xattrs")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func copyFile(source, target string) error {
|
||||||
|
src, err := os.Open(source)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to open source %s", source)
|
||||||
|
}
|
||||||
|
defer src.Close()
|
||||||
|
tgt, err := os.Create(target)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to open target %s", target)
|
||||||
|
}
|
||||||
|
defer tgt.Close()
|
||||||
|
|
||||||
|
return copyFileContent(tgt, src)
|
||||||
|
}
|
82
vendor/github.com/containerd/containerd/fs/copy_linux.go
generated
vendored
Normal file
82
vendor/github.com/containerd/containerd/fs/copy_linux.go
generated
vendored
Normal file
@ -0,0 +1,82 @@
|
|||||||
|
package fs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/containerd/continuity/sysx"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
func copyFileInfo(fi os.FileInfo, name string) error {
|
||||||
|
st := fi.Sys().(*syscall.Stat_t)
|
||||||
|
if err := os.Lchown(name, int(st.Uid), int(st.Gid)); err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to chown %s", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
if (fi.Mode() & os.ModeSymlink) != os.ModeSymlink {
|
||||||
|
if err := os.Chmod(name, fi.Mode()); err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to chmod %s", name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
timespec := []unix.Timespec{unix.Timespec(st.Atim), unix.Timespec(st.Mtim)}
|
||||||
|
if err := unix.UtimesNanoAt(unix.AT_FDCWD, name, timespec, unix.AT_SYMLINK_NOFOLLOW); err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to utime %s", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func copyFileContent(dst, src *os.File) error {
|
||||||
|
st, err := src.Stat()
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "unable to stat source")
|
||||||
|
}
|
||||||
|
|
||||||
|
n, err := sysx.CopyFileRange(src.Fd(), nil, dst.Fd(), nil, int(st.Size()), 0)
|
||||||
|
if err != nil {
|
||||||
|
if err != syscall.ENOSYS && err != syscall.EXDEV {
|
||||||
|
return errors.Wrap(err, "copy file range failed")
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := bufferPool.Get().([]byte)
|
||||||
|
_, err = io.CopyBuffer(dst, src, buf)
|
||||||
|
bufferPool.Put(buf)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if int64(n) != st.Size() {
|
||||||
|
return errors.Wrapf(err, "short copy: %d of %d", int64(n), st.Size())
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func copyXAttrs(dst, src string) error {
|
||||||
|
xattrKeys, err := sysx.LListxattr(src)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to list xattrs on %s", src)
|
||||||
|
}
|
||||||
|
for _, xattr := range xattrKeys {
|
||||||
|
data, err := sysx.LGetxattr(src, xattr)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to get xattr %q on %s", xattr, src)
|
||||||
|
}
|
||||||
|
if err := sysx.LSetxattr(dst, xattr, data, 0); err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to set xattr %q on %s", xattr, dst)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func copyDevice(dst string, fi os.FileInfo) error {
|
||||||
|
st, ok := fi.Sys().(*syscall.Stat_t)
|
||||||
|
if !ok {
|
||||||
|
return errors.New("unsupported stat type")
|
||||||
|
}
|
||||||
|
return syscall.Mknod(dst, uint32(fi.Mode()), int(st.Rdev))
|
||||||
|
}
|
65
vendor/github.com/containerd/containerd/fs/copy_unix.go
generated
vendored
Normal file
65
vendor/github.com/containerd/containerd/fs/copy_unix.go
generated
vendored
Normal file
@ -0,0 +1,65 @@
|
|||||||
|
// +build darwin freebsd
|
||||||
|
|
||||||
|
package fs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/containerd/continuity/sysx"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
func copyFileInfo(fi os.FileInfo, name string) error {
|
||||||
|
st := fi.Sys().(*syscall.Stat_t)
|
||||||
|
if err := os.Lchown(name, int(st.Uid), int(st.Gid)); err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to chown %s", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
if (fi.Mode() & os.ModeSymlink) != os.ModeSymlink {
|
||||||
|
if err := os.Chmod(name, fi.Mode()); err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to chmod %s", name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := syscall.UtimesNano(name, []syscall.Timespec{st.Atimespec, st.Mtimespec}); err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to utime %s", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func copyFileContent(dst, src *os.File) error {
|
||||||
|
buf := bufferPool.Get().([]byte)
|
||||||
|
_, err := io.CopyBuffer(dst, src, buf)
|
||||||
|
bufferPool.Put(buf)
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func copyXAttrs(dst, src string) error {
|
||||||
|
xattrKeys, err := sysx.LListxattr(src)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to list xattrs on %s", src)
|
||||||
|
}
|
||||||
|
for _, xattr := range xattrKeys {
|
||||||
|
data, err := sysx.LGetxattr(src, xattr)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to get xattr %q on %s", xattr, src)
|
||||||
|
}
|
||||||
|
if err := sysx.LSetxattr(dst, xattr, data, 0); err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to set xattr %q on %s", xattr, dst)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func copyDevice(dst string, fi os.FileInfo) error {
|
||||||
|
st, ok := fi.Sys().(*syscall.Stat_t)
|
||||||
|
if !ok {
|
||||||
|
return errors.New("unsupported stat type")
|
||||||
|
}
|
||||||
|
return syscall.Mknod(dst, uint32(fi.Mode()), int(st.Rdev))
|
||||||
|
}
|
33
vendor/github.com/containerd/containerd/fs/copy_windows.go
generated
vendored
Normal file
33
vendor/github.com/containerd/containerd/fs/copy_windows.go
generated
vendored
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
package fs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
func copyFileInfo(fi os.FileInfo, name string) error {
|
||||||
|
if err := os.Chmod(name, fi.Mode()); err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to chmod %s", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: copy windows specific metadata
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func copyFileContent(dst, src *os.File) error {
|
||||||
|
buf := bufferPool.Get().([]byte)
|
||||||
|
_, err := io.CopyBuffer(dst, src, buf)
|
||||||
|
bufferPool.Put(buf)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func copyXAttrs(dst, src string) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func copyDevice(dst string, fi os.FileInfo) error {
|
||||||
|
return errors.New("device copy not supported")
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user