Merge pull request #110 from Random-Liu/use-vndr
Use vndr instead of godep.
This commit is contained in:
commit
f0e6fe3251
@ -23,4 +23,5 @@ Follow either of the two links above to access the appropriate CLA and instructi
|
||||
|
||||
### Adding dependencies
|
||||
|
||||
If your patch depends on new packages, add that package with [`godep`](https://github.com/tools/godep). Follow the [instructions to add a dependency](https://github.com/kubernetes/community/blob/master/contributors/devel/development.md#dependency-management).
|
||||
If your patch depends on new packages, add that package with [`vndr`](https://github.com/LK4D4/vndr). Please
|
||||
modify `vendor.conf` and run the `vndr` tool to update the `vendor/` directory.
|
||||
|
569
Godeps/Godeps.json
generated
569
Godeps/Godeps.json
generated
@ -1,569 +0,0 @@
|
||||
{
|
||||
"ImportPath": "github.com/kubernetes-incubator/cri-containerd",
|
||||
"GoVersion": "go1.8",
|
||||
"GodepVersion": "v79",
|
||||
"Packages": [
|
||||
"./..."
|
||||
],
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "github.com/Microsoft/go-winio",
|
||||
"Comment": "v0.4.4",
|
||||
"Rev": "7ff89941bcb93df2e962467fb073c6e997b13cf0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/blang/semver",
|
||||
"Comment": "v3.1.0",
|
||||
"Rev": "aea32c919a18e5ef4537bbd283ff29594b1b0165"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/boltdb/bolt",
|
||||
"Comment": "v1.3.0-58-ge9cf4fa",
|
||||
"Rev": "e9cf4fae01b5a8ff89d0ec6b32f0d9c9f79aefdd"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd",
|
||||
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/api/services/containers/v1",
|
||||
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/api/services/content/v1",
|
||||
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/api/services/diff/v1",
|
||||
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/api/services/events/v1",
|
||||
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/api/services/images/v1",
|
||||
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/api/services/namespaces/v1",
|
||||
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/api/services/snapshot/v1",
|
||||
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/api/services/tasks/v1",
|
||||
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/api/services/version/v1",
|
||||
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/api/types",
|
||||
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/api/types/task",
|
||||
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/containers",
|
||||
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/content",
|
||||
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/errdefs",
|
||||
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/events",
|
||||
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/filters",
|
||||
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/fs",
|
||||
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/identifiers",
|
||||
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/images",
|
||||
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/linux/runcopts",
|
||||
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/log",
|
||||
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/metadata",
|
||||
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/mount",
|
||||
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/namespaces",
|
||||
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/oci",
|
||||
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/plugin",
|
||||
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/reference",
|
||||
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/remotes",
|
||||
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/remotes/docker",
|
||||
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/remotes/docker/schema1",
|
||||
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/rootfs",
|
||||
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/services/content",
|
||||
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/services/diff",
|
||||
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/services/images",
|
||||
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/services/snapshot",
|
||||
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/snapshot",
|
||||
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/typeurl",
|
||||
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/continuity/sysx",
|
||||
"Rev": "86cec1535a968310e7532819f699ff2830ed7463"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/fifo",
|
||||
"Rev": "fbfb6a11ec671efbe94ad1c12c2e98773f19e1e6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containernetworking/cni/libcni",
|
||||
"Comment": "v0.4.0",
|
||||
"Rev": "530dd71e193012895ff7beee42cd64a02e1993da"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containernetworking/cni/pkg/invoke",
|
||||
"Comment": "v0.4.0",
|
||||
"Rev": "530dd71e193012895ff7beee42cd64a02e1993da"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containernetworking/cni/pkg/types",
|
||||
"Comment": "v0.4.0",
|
||||
"Rev": "530dd71e193012895ff7beee42cd64a02e1993da"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containernetworking/cni/pkg/version",
|
||||
"Comment": "v0.4.0",
|
||||
"Rev": "530dd71e193012895ff7beee42cd64a02e1993da"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/davecgh/go-spew/spew",
|
||||
"Comment": "v1.1.0",
|
||||
"Rev": "346938d642f2ec3594ed81d874461961cd0faa76"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/distribution/digestset",
|
||||
"Comment": "v2.6.0-rc.1-130-gb38e583",
|
||||
"Rev": "b38e5838b7b2f2ad48e06ec4b500011976080621"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/distribution/reference",
|
||||
"Comment": "v2.6.0-rc.1-130-gb38e583",
|
||||
"Rev": "b38e5838b7b2f2ad48e06ec4b500011976080621"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/pkg/mount",
|
||||
"Comment": "v1.13.1",
|
||||
"Rev": "092cba3727bb9b4a2f0e922cd6c0f93ea270e363"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/pkg/random",
|
||||
"Comment": "v1.13.1",
|
||||
"Rev": "092cba3727bb9b4a2f0e922cd6c0f93ea270e363"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/pkg/signal",
|
||||
"Comment": "v1.13.1",
|
||||
"Rev": "092cba3727bb9b4a2f0e922cd6c0f93ea270e363"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/pkg/stringid",
|
||||
"Comment": "v1.13.1",
|
||||
"Rev": "092cba3727bb9b4a2f0e922cd6c0f93ea270e363"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/go-events",
|
||||
"Rev": "9461782956ad83b30282bf90e31fa6a70c255ba9"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/fsnotify/fsnotify",
|
||||
"Comment": "v1.4.2-4-g7d7316e",
|
||||
"Rev": "7d7316ed6e1ed2de075aab8dfc76de5d158d66e1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gogo/protobuf/gogoproto",
|
||||
"Comment": "v0.3-150-gd2e1ade",
|
||||
"Rev": "d2e1ade2d719b78fe5b061b4c18a9f7111b5bdc8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gogo/protobuf/proto",
|
||||
"Comment": "v0.3-150-gd2e1ade",
|
||||
"Rev": "d2e1ade2d719b78fe5b061b4c18a9f7111b5bdc8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/descriptor",
|
||||
"Comment": "v0.3-150-gd2e1ade",
|
||||
"Rev": "d2e1ade2d719b78fe5b061b4c18a9f7111b5bdc8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gogo/protobuf/sortkeys",
|
||||
"Comment": "v0.3-150-gd2e1ade",
|
||||
"Rev": "d2e1ade2d719b78fe5b061b4c18a9f7111b5bdc8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gogo/protobuf/types",
|
||||
"Comment": "v0.3-150-gd2e1ade",
|
||||
"Rev": "d2e1ade2d719b78fe5b061b4c18a9f7111b5bdc8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/glog",
|
||||
"Rev": "44145f04b68cf362d9c4df2182967c2275eaefed"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/mock/gomock",
|
||||
"Rev": "bd3c8e81be01eef76d4b503f5e687d2d1354d2d9"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/protobuf/proto",
|
||||
"Rev": "5a0f697c9ed9d68fef0116532c6e05cfeae00e55"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/protobuf/ptypes/any",
|
||||
"Rev": "5a0f697c9ed9d68fef0116532c6e05cfeae00e55"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/protobuf/ptypes/empty",
|
||||
"Rev": "5a0f697c9ed9d68fef0116532c6e05cfeae00e55"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/jpillora/backoff",
|
||||
"Rev": "06c7a16c845dc8e0bf575fafeeca0f5462f5eb4d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/kubernetes-incubator/cri-o/pkg/ocicni",
|
||||
"Comment": "v0.3",
|
||||
"Rev": "f648cd6e60948e4da391040e5c75d8175fea4fb7"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/go-digest",
|
||||
"Rev": "21dfd564fd89c944783d00d069f33e3e7123c448"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/image-spec/identity",
|
||||
"Comment": "v1.0.0-rc6-12-g372ad78",
|
||||
"Rev": "372ad780f63454fbbbbcc7cf80e5b90245c13e13"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/image-spec/specs-go",
|
||||
"Comment": "v1.0.0-rc6-12-g372ad78",
|
||||
"Rev": "372ad780f63454fbbbbcc7cf80e5b90245c13e13"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/image-spec/specs-go/v1",
|
||||
"Comment": "v1.0.0-rc6-12-g372ad78",
|
||||
"Rev": "372ad780f63454fbbbbcc7cf80e5b90245c13e13"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/runc/libcontainer/configs",
|
||||
"Comment": "v1.0.0-rc3-161-ge775f0f",
|
||||
"Rev": "e775f0fba3ea329b8b766451c892c41a3d49594d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/runc/libcontainer/devices",
|
||||
"Comment": "v1.0.0-rc3-161-ge775f0f",
|
||||
"Rev": "e775f0fba3ea329b8b766451c892c41a3d49594d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/runtime-spec/specs-go",
|
||||
"Comment": "v1.0.0",
|
||||
"Rev": "02137cd4e50b37a01665e1731fcd4ac2d2178230"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/runtime-tools/generate",
|
||||
"Rev": "e29f3ca4eb806a582ee1a1864c7b0563bd64c19b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/runtime-tools/generate/seccomp",
|
||||
"Rev": "e29f3ca4eb806a582ee1a1864c7b0563bd64c19b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/runtime-tools/validate",
|
||||
"Rev": "e29f3ca4eb806a582ee1a1864c7b0563bd64c19b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/pkg/errors",
|
||||
"Comment": "v0.8.0",
|
||||
"Rev": "645ef00459ed84a119197bfb8d8205042c6df63d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/pmezard/go-difflib/difflib",
|
||||
"Comment": "v1.0.0",
|
||||
"Rev": "792786c7400a136282c1664665ae0a8db921c6c2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/sirupsen/logrus",
|
||||
"Comment": "v1.0.0",
|
||||
"Rev": "202f25545ea4cf9b191ff7f846df5d87c9382c2b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/spf13/pflag",
|
||||
"Rev": "9ff6c6923cfffbcd502984b8e0c80539a94968b7"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/stretchr/testify/assert",
|
||||
"Comment": "v1.1.4",
|
||||
"Rev": "69483b4bd14f5845b5a1e55bca19e954e827f1d0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/stretchr/testify/require",
|
||||
"Comment": "v1.1.4",
|
||||
"Rev": "69483b4bd14f5845b5a1e55bca19e954e827f1d0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/gocapability/capability",
|
||||
"Rev": "e7cb7fa329f456b3855136a2642b197bad7366ba"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/context",
|
||||
"Rev": "7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/context/ctxhttp",
|
||||
"Rev": "7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/http2",
|
||||
"Rev": "7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/http2/hpack",
|
||||
"Rev": "7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/idna",
|
||||
"Rev": "7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/internal/timeseries",
|
||||
"Rev": "7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/lex/httplex",
|
||||
"Rev": "7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/trace",
|
||||
"Rev": "7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/sync/errgroup",
|
||||
"Rev": "450f422ab23cf9881c94e2db30cac0eb1b7cf80c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/sys/unix",
|
||||
"Rev": "739734461d1c916b6c72a63d7efda2b27edb369f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/sys/windows",
|
||||
"Rev": "739734461d1c916b6c72a63d7efda2b27edb369f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/secure/bidirule",
|
||||
"Rev": "19e51611da83d6be54ddafce4a4af510cb3e9ea4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/transform",
|
||||
"Rev": "19e51611da83d6be54ddafce4a4af510cb3e9ea4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/unicode/bidi",
|
||||
"Rev": "19e51611da83d6be54ddafce4a4af510cb3e9ea4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/unicode/norm",
|
||||
"Rev": "19e51611da83d6be54ddafce4a4af510cb3e9ea4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/genproto/googleapis/rpc/status",
|
||||
"Rev": "d80a6e20e776b0b17a324d0ba1ab50a39c8e8944"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc",
|
||||
"Comment": "v1.3.0",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/codes",
|
||||
"Comment": "v1.3.0",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/credentials",
|
||||
"Comment": "v1.3.0",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1",
|
||||
"Comment": "v1.3.0",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/grpclog",
|
||||
"Comment": "v1.3.0",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/health/grpc_health_v1",
|
||||
"Comment": "v1.3.0",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/internal",
|
||||
"Comment": "v1.3.0",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/keepalive",
|
||||
"Comment": "v1.3.0",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/metadata",
|
||||
"Comment": "v1.3.0",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/naming",
|
||||
"Comment": "v1.3.0",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/peer",
|
||||
"Comment": "v1.3.0",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/stats",
|
||||
"Comment": "v1.3.0",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/status",
|
||||
"Comment": "v1.3.0",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/tap",
|
||||
"Comment": "v1.3.0",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/transport",
|
||||
"Comment": "v1.3.0",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime",
|
||||
"Comment": "v1.8.0-alpha.0-743-gd779e9c",
|
||||
"Rev": "d779e9c9561b732adf06263c5424889e7564fdbd"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/util/interrupt",
|
||||
"Comment": "v1.8.0-alpha.0-743-gd779e9c",
|
||||
"Rev": "d779e9c9561b732adf06263c5424889e7564fdbd"
|
||||
}
|
||||
]
|
||||
}
|
5
Godeps/Readme
generated
5
Godeps/Readme
generated
@ -1,5 +0,0 @@
|
||||
This directory tree is generated automatically by godep.
|
||||
|
||||
Please do not edit.
|
||||
|
||||
See https://github.com/tools/godep for more information.
|
@ -77,6 +77,7 @@ CONTAINER_RUNTIME=remote CONTAINER_RUNTIME_ENDPOINT='/var/run/cri-containerd.soc
|
||||
See [here](./docs) for additional documentation.
|
||||
## Contributing
|
||||
Interested in contributing? Check out the [documentation](./CONTRIBUTING.md).
|
||||
|
||||
## Kubernetes Incubator
|
||||
This is a [Kubernetes Incubator project](https://github.com/kubernetes/community/blob/master/incubator.md).
|
||||
The project was established 2017/4/13. The incubator team for the project is:
|
||||
|
36
vendor.conf
Normal file
36
vendor.conf
Normal file
@ -0,0 +1,36 @@
|
||||
github.com/Microsoft/go-winio v0.4.4
|
||||
github.com/blang/semver v3.1.0
|
||||
github.com/boltdb/bolt v1.3.0-58-ge9cf4fa
|
||||
github.com/containerd/containerd 2386062ce152d6f158d22be5991fe11c7cf67535
|
||||
github.com/containerd/continuity 86cec1535a968310e7532819f699ff2830ed7463
|
||||
github.com/containerd/fifo fbfb6a11ec671efbe94ad1c12c2e98773f19e1e6
|
||||
github.com/containernetworking/cni v0.4.0
|
||||
github.com/davecgh/go-spew v1.1.0
|
||||
github.com/docker/distribution b38e5838b7b2f2ad48e06ec4b500011976080621
|
||||
github.com/docker/docker v1.13.1
|
||||
github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
|
||||
github.com/fsnotify/fsnotify 7d7316ed6e1ed2de075aab8dfc76de5d158d66e1
|
||||
github.com/gogo/protobuf d2e1ade2d719b78fe5b061b4c18a9f7111b5bdc8
|
||||
github.com/golang/glog 44145f04b68cf362d9c4df2182967c2275eaefed
|
||||
github.com/golang/mock bd3c8e81be01eef76d4b503f5e687d2d1354d2d9
|
||||
github.com/golang/protobuf 5a0f697c9ed9d68fef0116532c6e05cfeae00e55
|
||||
github.com/jpillora/backoff 06c7a16c845dc8e0bf575fafeeca0f5462f5eb4d
|
||||
github.com/kubernetes-incubator/cri-o v0.3
|
||||
github.com/opencontainers/go-digest 21dfd564fd89c944783d00d069f33e3e7123c448
|
||||
github.com/opencontainers/image-spec 372ad780f63454fbbbbcc7cf80e5b90245c13e13
|
||||
github.com/opencontainers/runc e775f0fba3ea329b8b766451c892c41a3d49594d
|
||||
github.com/opencontainers/runtime-spec v1.0.0
|
||||
github.com/opencontainers/runtime-tools e29f3ca4eb806a582ee1a1864c7b0563bd64c19b
|
||||
github.com/pkg/errors v0.8.0
|
||||
github.com/pmezard/go-difflib v1.0.0
|
||||
github.com/sirupsen/logrus v1.0.0
|
||||
github.com/spf13/pflag 9ff6c6923cfffbcd502984b8e0c80539a94968b7
|
||||
github.com/stretchr/testify v1.1.4
|
||||
github.com/syndtr/gocapability e7cb7fa329f456b3855136a2642b197bad7366ba
|
||||
golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6
|
||||
golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c
|
||||
golang.org/x/sys 739734461d1c916b6c72a63d7efda2b27edb369f
|
||||
golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4
|
||||
google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
|
||||
google.golang.org/grpc v1.3.0
|
||||
k8s.io/kubernetes d779e9c9561b732adf06263c5424889e7564fdbd
|
1
vendor/github.com/Microsoft/go-winio/.gitignore
generated
vendored
1
vendor/github.com/Microsoft/go-winio/.gitignore
generated
vendored
@ -1 +0,0 @@
|
||||
*.exe
|
4
vendor/github.com/boltdb/bolt/.gitignore
generated
vendored
4
vendor/github.com/boltdb/bolt/.gitignore
generated
vendored
@ -1,4 +0,0 @@
|
||||
*.prof
|
||||
*.test
|
||||
*.swp
|
||||
/bin/
|
18
vendor/github.com/boltdb/bolt/Makefile
generated
vendored
18
vendor/github.com/boltdb/bolt/Makefile
generated
vendored
@ -1,18 +0,0 @@
|
||||
BRANCH=`git rev-parse --abbrev-ref HEAD`
|
||||
COMMIT=`git rev-parse --short HEAD`
|
||||
GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)"
|
||||
|
||||
default: build
|
||||
|
||||
race:
|
||||
@go test -v -race -test.run="TestSimulate_(100op|1000op)"
|
||||
|
||||
# go get github.com/kisielk/errcheck
|
||||
errcheck:
|
||||
@errcheck -ignorepkg=bytes -ignore=os:Remove github.com/boltdb/bolt
|
||||
|
||||
test:
|
||||
@go test -v -cover .
|
||||
@go test -v ./cmd/bolt
|
||||
|
||||
.PHONY: fmt test
|
18
vendor/github.com/boltdb/bolt/appveyor.yml
generated
vendored
18
vendor/github.com/boltdb/bolt/appveyor.yml
generated
vendored
@ -1,18 +0,0 @@
|
||||
version: "{build}"
|
||||
|
||||
os: Windows Server 2012 R2
|
||||
|
||||
clone_folder: c:\gopath\src\github.com\boltdb\bolt
|
||||
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
|
||||
install:
|
||||
- echo %PATH%
|
||||
- echo %GOPATH%
|
||||
- go version
|
||||
- go env
|
||||
- go get -v -t ./...
|
||||
|
||||
build_script:
|
||||
- go test -v ./...
|
37
vendor/github.com/containerd/containerd/.appveyor.yml
generated
vendored
37
vendor/github.com/containerd/containerd/.appveyor.yml
generated
vendored
@ -1,37 +0,0 @@
|
||||
version: "{build}"
|
||||
|
||||
image: Visual Studio 2017
|
||||
|
||||
clone_folder: c:\gopath\src\github.com\containerd\containerd
|
||||
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
|
||||
environment:
|
||||
GOPATH: C:\gopath
|
||||
CGO_ENABLED: 1
|
||||
|
||||
before_build:
|
||||
- choco install -y mingw
|
||||
# TODO: re-enable once the content unit-test have been updated to pass on windows
|
||||
#- choco install codecov
|
||||
|
||||
build_script:
|
||||
- bash.exe -lc "export PATH=/c/tools/mingw64/bin:/c/gopath/src/github.com/containerd/containerd/bin:$PATH ; mingw32-make.exe fmt"
|
||||
- bash.exe -lc "export PATH=/c/tools/mingw64/bin:/c/gopath/src/github.com/containerd/containerd/bin:$PATH ; mingw32-make.exe vet"
|
||||
- bash.exe -lc "export PATH=/c/tools/mingw64/bin:$PATH ; mingw32-make.exe build"
|
||||
- bash.exe -lc "export PATH=/c/tools/mingw64/bin:$PATH ; mingw32-make.exe binaries"
|
||||
|
||||
test_script:
|
||||
# TODO: need an equivalent of TRAVIS_COMMIT_RANGE
|
||||
# - GIT_CHECK_EXCLUDE="./vendor" TRAVIS_COMMIT_RANGE="${TRAVIS_COMMIT_RANGE/.../..}" C:\MinGW\bin\mingw32-make.exe dco
|
||||
- bash.exe -lc "export PATH=/c/tools/mingw64/bin:/c/gopath/src/github.com/containerd/containerd/bin:$PATH ; mingw32-make.exe integration"
|
||||
# TODO: re-enable once the content unit-test have been updated to pass on windows
|
||||
#- bash.exe -lc "export PATH=/c/tools/mingw64/bin:/c/gopath/src/github.com/containerd/containerd/bin:$PATH ; mingw32-make.exe coverage"
|
||||
#- bash.exe -lc "export PATH=/c/tools/mingw64/bin:/c/gopath/src/github.com/containerd/containerd/bin:$PATH ; mingw32-make.exe root-coverage"
|
||||
|
||||
on_success:
|
||||
# Note that, a Codecov upload token is not required.
|
||||
# TODO: re-enable once the content unit-test have been updated to pass on windows
|
||||
#- codecov -f coverage.txt
|
4
vendor/github.com/containerd/containerd/.gitignore
generated
vendored
4
vendor/github.com/containerd/containerd/.gitignore
generated
vendored
@ -1,4 +0,0 @@
|
||||
/bin/
|
||||
**/coverage.txt
|
||||
**/coverage.out
|
||||
containerd.test
|
67
vendor/github.com/containerd/containerd/.travis.yml
generated
vendored
67
vendor/github.com/containerd/containerd/.travis.yml
generated
vendored
@ -1,67 +0,0 @@
|
||||
dist: trusty
|
||||
sudo: required
|
||||
# setup travis so that we can run containers for integration tests
|
||||
services:
|
||||
- docker
|
||||
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.8.x
|
||||
|
||||
go_import_path: github.com/containerd/containerd
|
||||
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- btrfs-tools
|
||||
- libseccomp-dev
|
||||
- libapparmor-dev
|
||||
- libnl-3-dev
|
||||
- libnet-dev
|
||||
- protobuf-c-compiler
|
||||
- protobuf-compiler
|
||||
- python-minimal
|
||||
- libcap-dev
|
||||
- libaio-dev
|
||||
- libprotobuf-c0-dev
|
||||
- libprotobuf-dev
|
||||
|
||||
env:
|
||||
- TRAVIS_GOOS=windows TRAVIS_CGO_ENABLED=1
|
||||
- TRAVIS_GOOS=linux TRAVIS_CGO_ENABLED=1
|
||||
- TRAVIS_GOOS=darwin TRAVIS_CGO_ENABLED=0
|
||||
|
||||
before_install:
|
||||
- uname -r
|
||||
|
||||
install:
|
||||
- if [ "$TRAVIS_GOOS" = "windows" ] ; then sudo apt-get install -y gcc-multilib gcc-mingw-w64; export CC=x86_64-w64-mingw32-gcc ; export CXX=x86_64-w64-mingw32-g++ ; fi
|
||||
- wget https://github.com/google/protobuf/releases/download/v3.1.0/protoc-3.1.0-linux-x86_64.zip -O /tmp/protoc-3.1.0-linux-x86_64.zip
|
||||
- unzip -o -d /tmp/protobuf /tmp/protoc-3.1.0-linux-x86_64.zip
|
||||
- export PATH=$PATH:/tmp/protobuf/bin/
|
||||
- go get -u github.com/vbatts/git-validation
|
||||
- sudo wget https://github.com/crosbymichael/runc/releases/download/ctd-4/runc -O /bin/runc; sudo chmod +x /bin/runc
|
||||
- wget https://github.com/xemul/criu/archive/v3.0.tar.gz -O /tmp/criu.tar.gz
|
||||
- tar -C /tmp/ -zxf /tmp/criu.tar.gz
|
||||
- cd /tmp/criu-3.0 && sudo make install-criu
|
||||
- cd $TRAVIS_BUILD_DIR
|
||||
|
||||
script:
|
||||
- export GOOS=$TRAVIS_GOOS
|
||||
- export CGO_ENABLED=$TRAVIS_CGO_ENABLED
|
||||
- GIT_CHECK_EXCLUDE="./vendor" TRAVIS_COMMIT_RANGE="${TRAVIS_COMMIT_RANGE/.../..}" make dco
|
||||
- make fmt
|
||||
# FIXME: For non-linux GOOS, without running `go build -i`, vet fails with `vet: import failed: can't find import: fmt`...
|
||||
# Note that `go build -i` requires write permission to GOROOT. (So it is not called in Makefile)
|
||||
- go build -i .
|
||||
- make vet
|
||||
- make build
|
||||
- make binaries
|
||||
- if [ "$GOOS" = "linux" ]; then sudo make install ; fi
|
||||
- if [ "$GOOS" = "linux" ]; then make coverage ; fi
|
||||
- if [ "$GOOS" = "linux" ]; then sudo PATH=$PATH GOPATH=$GOPATH make root-coverage ; fi
|
||||
- if [ "$GOOS" = "linux" ]; then sudo PATH=$PATH GOPATH=$GOPATH make integration ; fi
|
||||
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
73
vendor/github.com/containerd/containerd/BUILDING.md
generated
vendored
73
vendor/github.com/containerd/containerd/BUILDING.md
generated
vendored
@ -1,73 +0,0 @@
|
||||
# Build containerd from source
|
||||
|
||||
This guide is useful if you intend to contribute on containerd. Thanks for your
|
||||
effort. Every contribution is very appreciated.
|
||||
|
||||
## Build the development environment
|
||||
|
||||
In first you need to setup your Go development environment. You can follow this
|
||||
guideline [How to write go code](https://golang.org/doc/code.html) and at the
|
||||
end you need to have `GOPATH` and `GOROOT` set in your environment.
|
||||
|
||||
Current containerd requires Go 1.8.x or above.
|
||||
|
||||
At this point you can use `go` to checkout `containerd` in your `GOPATH`:
|
||||
|
||||
```sh
|
||||
go get github.com/containerd/containerd
|
||||
```
|
||||
|
||||
`containerd` uses [Btrfs](https://en.wikipedia.org/wiki/Btrfs) it means that you
|
||||
need to satisfy this dependencies in your system:
|
||||
|
||||
* CentOS/Fedora: `yum install btrfs-progs-devel`
|
||||
* Debian/Ubuntu: `apt-get install btrfs-tools`
|
||||
|
||||
At this point you are ready to build `containerd` yourself.
|
||||
|
||||
## In your local environment
|
||||
|
||||
`containerd` uses `make` to create a repeatable build flow. It means that you
|
||||
can run:
|
||||
|
||||
```sudo
|
||||
make
|
||||
```
|
||||
|
||||
This is going to build all the binaries provided by this project in the `./bin`
|
||||
directory.
|
||||
|
||||
You can move them in your global path with:
|
||||
|
||||
```sudo
|
||||
sudo make install
|
||||
```
|
||||
|
||||
## Via Docker Container
|
||||
|
||||
You can build `containerd` via Docker container. You can build an image from
|
||||
this `Dockerfile`:
|
||||
|
||||
```
|
||||
FROM golang
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install btrfs-tools
|
||||
```
|
||||
|
||||
Let's suppose that you built an image called `containerd/build` and you are into
|
||||
the containerd root directory you can run the following command:
|
||||
|
||||
```sh
|
||||
docker run -it --rm \
|
||||
-v ${PWD}:/go/src/github.com/containerd/containerd \
|
||||
-e GOPATH=/go \
|
||||
-w /go/src/github.com/containerd/containerd containerd/build make
|
||||
```
|
||||
|
||||
At this point you can find your binaries in the `./bin` directory in your host.
|
||||
You can move the binaries in your `$PATH` with the command:
|
||||
|
||||
```sh
|
||||
sudo make install
|
||||
```
|
115
vendor/github.com/containerd/containerd/CONTRIBUTING.md
generated
vendored
115
vendor/github.com/containerd/containerd/CONTRIBUTING.md
generated
vendored
@ -1,115 +0,0 @@
|
||||
# Contributing
|
||||
|
||||
Contributions should be made via pull requests. Pull requests will be reviewed
|
||||
by one or more maintainers and merged when acceptable.
|
||||
|
||||
This project is in an early state, making the impact of contributions much
|
||||
greater than at other stages. In this respect, it is important to consider any
|
||||
changes or additions for their future impact more so than their current impact.
|
||||
|
||||
## Successful Changes
|
||||
|
||||
We ask that before contributing, please make the effort to coordinate with the
|
||||
maintainers of the project before submitting large or high impact PRs. This
|
||||
will prevent you from doing extra work that may or may not be merged.
|
||||
|
||||
PRs that are just submitted without any prior communication will likely be
|
||||
summarily closed.
|
||||
|
||||
While pull requests are the methodology for submitting changes to code, changes
|
||||
are much more likely to be accepted if they are accompanied by additional
|
||||
engineering work. While we don't define this explicitly, most of these goals
|
||||
are accomplished through communication of the design goals and subsequent
|
||||
solutions. Often times, it helps to first state the problem before presenting
|
||||
solutions.
|
||||
|
||||
Typically, the best methods of accomplishing this are to submit an issue,
|
||||
stating the problem. This issue can include a problem statement and a
|
||||
checklist with requirements. If solutions are proposed, alternatives should be
|
||||
listed and eliminated. Even if the criteria for elimination of a solution is
|
||||
frivolous, say so.
|
||||
|
||||
Larger changes typically work best with design documents, similar to those found
|
||||
in `design/`. These are focused on providing context to the design at the time
|
||||
the feature was conceived and can inform future documentation contributions.
|
||||
|
||||
## Commit Messages
|
||||
|
||||
There are times for one line commit messages and this is not one of them.
|
||||
Commit messages should follow best practices, including explaining the context
|
||||
of the problem and how it was solved, including in caveats or follow up changes
|
||||
required. They should tell the story of the change and provide readers
|
||||
understanding of what led to it.
|
||||
|
||||
If you're lost about what this even means, please see [How to Write a Git
|
||||
Commit Message](http://chris.beams.io/posts/git-commit/) for a start.
|
||||
|
||||
In practice, the best approach to maintaining a nice commit message is to
|
||||
leverage a `git add -p` and `git commit --amend` to formulate a solid
|
||||
changeset. This allows one to piece together a change, as information becomes
|
||||
available.
|
||||
|
||||
If you squash a series of commits, don't just submit that. Re-write the commit
|
||||
message, as if the series of commits was a single stroke of brilliance.
|
||||
|
||||
That said, there is no requirement to have a single commit for a PR, as long as
|
||||
each commit tells the story. For example, if there is a feature that requires a
|
||||
package, it might make sense to have the package in a separate commit then have
|
||||
a subsequent commit that uses it.
|
||||
|
||||
Remember, you're telling part of the story with the commit message. Don't make
|
||||
your chapter weird.
|
||||
|
||||
## Sign your work
|
||||
|
||||
The sign-off is a simple line at the end of the explanation for the patch. Your
|
||||
signature certifies that you wrote the patch or otherwise have the right to pass
|
||||
it on as an open-source patch. The rules are pretty simple: if you can certify
|
||||
the below (from [developercertificate.org](http://developercertificate.org/)):
|
||||
|
||||
```
|
||||
Developer Certificate of Origin
|
||||
Version 1.1
|
||||
|
||||
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||
660 York Street, Suite 102,
|
||||
San Francisco, CA 94110 USA
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies of this
|
||||
license document, but changing it is not allowed.
|
||||
|
||||
Developer's Certificate of Origin 1.1
|
||||
|
||||
By making a contribution to this project, I certify that:
|
||||
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
||||
```
|
||||
|
||||
Then you just add a line to every git commit message:
|
||||
|
||||
Signed-off-by: Joe Smith <joe.smith@email.com>
|
||||
|
||||
Use your real name (sorry, no pseudonyms or anonymous contributions.)
|
||||
|
||||
If you set your `user.name` and `user.email` git configs, you can sign your
|
||||
commit automatically with `git commit -s`.
|
300
vendor/github.com/containerd/containerd/MAINTAINERS
generated
vendored
300
vendor/github.com/containerd/containerd/MAINTAINERS
generated
vendored
@ -1,300 +0,0 @@
|
||||
# containerd project maintainers file
|
||||
#
|
||||
# This file describes who runs the containerd project and how.
|
||||
# This is a living document - if you see something out of date or missing,
|
||||
# speak up!
|
||||
#
|
||||
# It is structured to be consumable by both humans and programs.
|
||||
# To extract its contents programmatically, use any TOML-compliant
|
||||
# parser.
|
||||
|
||||
[Rules]
|
||||
|
||||
[Rules.maintainers]
|
||||
|
||||
title = "What is a maintainer?"
|
||||
|
||||
text = """
|
||||
There are different types of maintainers, with different responsibilities, but
|
||||
all maintainers have 3 things in common:
|
||||
|
||||
1) They share responsibility in the project's success.
|
||||
2) They have made a long-term, recurring time investment to improve the project.
|
||||
3) They spend that time doing whatever needs to be done, not necessarily what
|
||||
is the most interesting or fun.
|
||||
|
||||
Maintainers are often under-appreciated, because their work is harder to appreciate.
|
||||
It's easy to appreciate a really cool and technically advanced feature. It's harder
|
||||
to appreciate the absence of bugs, the slow but steady improvement in stability,
|
||||
or the reliability of a release process. But those things distinguish a good
|
||||
project from a great one.
|
||||
"""
|
||||
|
||||
[Rules.adding-maintainers]
|
||||
|
||||
title = "How are maintainers added?"
|
||||
|
||||
text = """
|
||||
Maintainers are first and foremost contributors that have shown they are
|
||||
committed to the long term success of a project. Contributors wanting to become
|
||||
maintainers are expected to be deeply involved in contributing code, pull
|
||||
request review, and triage of issues in the project for more than three months.
|
||||
|
||||
Just contributing does not make you a maintainer, it is about building trust
|
||||
with the current maintainers of the project and being a person that they can
|
||||
depend on and trust to make decisions in the best interest of the project.
|
||||
|
||||
Periodically, the existing maintainers curate a list of contributors that have
|
||||
shown regular activity on the project over the prior months. From this list,
|
||||
maintainer candidates are selected and proposed on the maintainers mailing list.
|
||||
|
||||
After a candidate has been announced on the maintainers mailing list, the
|
||||
existing maintainers are given five business days to discuss the candidate,
|
||||
raise objections and cast their vote. Candidates must be approved by the BDFL
|
||||
and at least 66% of the current maintainers by adding their vote on the mailing
|
||||
list. Only maintainers of the repository that the candidate is proposed for are
|
||||
allowed to vote. The BDFL's vote is mandatory.
|
||||
|
||||
If a candidate is approved, a maintainer will contact the candidate to invite
|
||||
the candidate to open a pull request that adds the contributor to the
|
||||
MAINTAINERS file. The candidate becomes a maintainer once the pull request is
|
||||
merged.
|
||||
"""
|
||||
|
||||
[Rules.adding-sub-projects]
|
||||
|
||||
title = "How are sub projects added?"
|
||||
|
||||
text = """
|
||||
Similar to adding maintainers, new sub projects can be added to containerd
|
||||
GitHub organization as long as they adhere to the CNCF
|
||||
[charter](https://www.cncf.io/about/charter/) and mission. After a project
|
||||
proposal has been announced on a public forum (GitHub issue or mailing list),
|
||||
the existing maintainers are given five business days to discuss the new
|
||||
project, raise objections and cast their vote. Projects must be approved by at
|
||||
least 66% of the current maintainers by adding their vote.
|
||||
|
||||
If a project is approved, a maintainer will add the project to the containerd
|
||||
GitHub organization, and make an announcement on a public forum.
|
||||
"""
|
||||
|
||||
[Rules.stepping-down-policy]
|
||||
|
||||
title = "Stepping down policy"
|
||||
|
||||
text = """
|
||||
Life priorities, interests, and passions can change. If you're a maintainer but
|
||||
feel you must remove yourself from the list, inform other maintainers that you
|
||||
intend to step down, and if possible, help find someone to pick up your work.
|
||||
At the very least, ensure your work can be continued where you left off.
|
||||
|
||||
After you've informed other maintainers, create a pull request to remove
|
||||
yourself from the MAINTAINERS file.
|
||||
"""
|
||||
|
||||
[Rules.inactive-maintainers]
|
||||
|
||||
title = "Removal of inactive maintainers"
|
||||
|
||||
text = """
|
||||
Similar to the procedure for adding new maintainers, existing maintainers can
|
||||
be removed from the list if they do not show significant activity on the
|
||||
project. Periodically, the maintainers review the list of maintainers and their
|
||||
activity over the last three months.
|
||||
|
||||
If a maintainer has shown insufficient activity over this period, a neutral
|
||||
person will contact the maintainer to ask if they want to continue being
|
||||
a maintainer. If the maintainer decides to step down as a maintainer, they
|
||||
open a pull request to be removed from the MAINTAINERS file.
|
||||
|
||||
If the maintainer wants to remain a maintainer, but is unable to perform the
|
||||
required duties they can be removed with a vote by the BDFL and at least 66% of
|
||||
the current maintainers. The BDFL's vote is mandatory. An e-mail is sent to the
|
||||
mailing list, inviting maintainers of the project to vote. The voting period is
|
||||
five business days. Issues related to a maintainer's performance should be
|
||||
discussed with them among the other maintainers so that they are not surprised
|
||||
by a pull request removing them.
|
||||
"""
|
||||
|
||||
[Rules.bdfl]
|
||||
|
||||
title = "The Benevolent dictator for life (BDFL)"
|
||||
|
||||
text = """
|
||||
containerd follows the timeless, highly efficient and totally unfair system
|
||||
known as [Benevolent dictator for
|
||||
life](https://en.wikipedia.org/wiki/Benevolent_Dictator_for_Life), with yours
|
||||
truly, Solomon Hykes, in the role of BDFL. This means that all decisions are
|
||||
made, by default, by Solomon. Since making every decision himself would be
|
||||
highly un-scalable, in practice decisions are spread across multiple
|
||||
maintainers.
|
||||
|
||||
Ideally, the BDFL role is like the Queen of England: awesome crown, but not an
|
||||
actual operational role day-to-day. The real job of a BDFL is to NEVER GO AWAY.
|
||||
Every other rule can change, perhaps drastically so, but the BDFL will always be
|
||||
there, preserving the philosophy and principles of the project, and keeping
|
||||
ultimate authority over its fate. This gives us great flexibility in
|
||||
experimenting with various governance models, knowing that we can always press
|
||||
the "reset" button without fear of fragmentation or deadlock. See the US
|
||||
congress for a counter-example.
|
||||
|
||||
BDFL daily routine:
|
||||
|
||||
* Is the project governance stuck in a deadlock or irreversibly fragmented?
|
||||
* If yes: refactor the project governance
|
||||
* Are there issues or conflicts escalated by maintainers?
|
||||
* If yes: resolve them
|
||||
* Go back to polishing that crown.
|
||||
"""
|
||||
|
||||
[Rules.decisions]
|
||||
|
||||
title = "How are decisions made?"
|
||||
|
||||
text = """
|
||||
Short answer: EVERYTHING IS A PULL REQUEST.
|
||||
|
||||
containerd is an open-source project with an open design philosophy. This means
|
||||
that the repository is the source of truth for EVERY aspect of the project,
|
||||
including its philosophy, design, road map, and APIs. *If it's part of the
|
||||
project, it's in the repo. If it's in the repo, it's part of the project.*
|
||||
|
||||
As a result, all decisions can be expressed as changes to the repository. An
|
||||
implementation change is a change to the source code. An API change is a change
|
||||
to the API specification. A philosophy change is a change to the philosophy
|
||||
manifesto, and so on.
|
||||
|
||||
All decisions affecting containerd, big and small, follow the same 3 steps:
|
||||
|
||||
* Step 1: Open a pull request. Anyone can do this.
|
||||
|
||||
* Step 2: Discuss the pull request. Anyone can do this.
|
||||
|
||||
* Step 3: Merge or refuse the pull request. Who does this depends on the nature
|
||||
of the pull request and which areas of the project it affects.
|
||||
"""
|
||||
|
||||
[Rules.DCO]
|
||||
|
||||
title = "Helping contributors with the DCO"
|
||||
|
||||
text = """
|
||||
The [DCO or `Sign your work`](
|
||||
https://github.com/containerd/containerd/blob/master/CONTRIBUTING.md#sign-your-work)
|
||||
requirement is not intended as a roadblock or speed bump.
|
||||
|
||||
Some containerd contributors are not as familiar with `git`, or have used a web
|
||||
based editor, and thus asking them to `git commit --amend -s` is not the best
|
||||
way forward.
|
||||
|
||||
In this case, maintainers can update the commits based on clause (c) of the DCO.
|
||||
The most trivial way for a contributor to allow the maintainer to do this, is to
|
||||
add a DCO signature in a pull requests's comment, or a maintainer can simply
|
||||
note that the change is sufficiently trivial that it does not substantially
|
||||
change the existing contribution - i.e., a spelling change.
|
||||
|
||||
When you add someone's DCO, please also add your own to keep a log.
|
||||
"""
|
||||
|
||||
[Rules."no direct push"]
|
||||
|
||||
title = "I'm a maintainer. Should I make pull requests too?"
|
||||
|
||||
text = """
|
||||
Yes. Nobody should ever push to master directly. All changes should be
|
||||
made through a pull request.
|
||||
"""
|
||||
|
||||
[Rules.meta]
|
||||
|
||||
title = "How is this process changed?"
|
||||
|
||||
text = "Just like everything else: by making a pull request :)"
|
||||
|
||||
# Current project roles
|
||||
[Roles]
|
||||
|
||||
[Roles.bdfl]
|
||||
|
||||
person = "shykes"
|
||||
|
||||
[Roles."Chief Architect"]
|
||||
|
||||
person = "crosbymichael"
|
||||
|
||||
text = """
|
||||
The chief architect is responsible for the overall integrity of the technical
|
||||
architecture across all subsystems, and the consistency of APIs and UI.
|
||||
|
||||
Changes to UI, public APIs and overall architecture must be approved by the
|
||||
chief architect.
|
||||
"""
|
||||
|
||||
[Roles."Chief Maintainer"]
|
||||
|
||||
person = "crosbymichael"
|
||||
|
||||
text = """
|
||||
The chief maintainer is responsible for all aspects of quality for the project
|
||||
including code reviews, usability, stability, security, performance, etc. The
|
||||
most important function of the chief maintainer is to lead by example. On the
|
||||
first day of a new maintainer, the best advice should be "follow the C.M.'s
|
||||
example and you'll be fine".
|
||||
"""
|
||||
|
||||
# Current project organization
|
||||
[Org]
|
||||
|
||||
[Org.Maintainers]
|
||||
people = [
|
||||
"crosbymichael",
|
||||
"dqminh",
|
||||
"dmcgowan",
|
||||
"estesp",
|
||||
"hqhq",
|
||||
"jhowardmsft",
|
||||
"mlaventure",
|
||||
"stevvooe",
|
||||
]
|
||||
|
||||
[People]
|
||||
|
||||
[People.crosbymichael]
|
||||
Name = "Michael Crosby"
|
||||
Email = "crosbymichael@gmail.com"
|
||||
GitHub = "crosbymichael"
|
||||
|
||||
[People.dqminh]
|
||||
Name = "Daniel, Dao Quang Minh"
|
||||
Email = "dqminh89@gmail.com"
|
||||
GitHub = "dqminh"
|
||||
|
||||
[people.dmcgowan]
|
||||
Name = "Derek McGowan"
|
||||
Email = "derek@mcgstyle.net"
|
||||
GitHub = "dmcgowan"
|
||||
|
||||
[People.estesp]
|
||||
Name = "Phil Estes"
|
||||
Email = "estesp@gmail.com"
|
||||
GitHub = "estesp"
|
||||
|
||||
[People.hqhq]
|
||||
Name = "Qiang Huang"
|
||||
Email = "h.huangqiang@huawei.com"
|
||||
GitHub = "hqhq"
|
||||
|
||||
[People.jhowardmsft]
|
||||
Name = "John Howard"
|
||||
Email = "jhoward@microsoft.com"
|
||||
GitHub = "jhowardmsft"
|
||||
|
||||
[People.mlaventure]
|
||||
Name = "Kenfe-Mickaël Laventure"
|
||||
Email = "mickael.laventure@docker.com"
|
||||
GitHub = "mlaventure"
|
||||
|
||||
[People.stevvooe]
|
||||
Name = "Stephen Day"
|
||||
Email = "stephen.day@docker.com"
|
||||
GitHub = "stevvooe"
|
203
vendor/github.com/containerd/containerd/Makefile
generated
vendored
203
vendor/github.com/containerd/containerd/Makefile
generated
vendored
@ -1,203 +0,0 @@
|
||||
# Root directory of the project (absolute path).
|
||||
ROOTDIR=$(dir $(abspath $(lastword $(MAKEFILE_LIST))))
|
||||
|
||||
# Base path used to install.
|
||||
DESTDIR=/usr/local
|
||||
|
||||
# Used to populate variables in version package.
|
||||
VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always)
|
||||
REVISION=$(shell git rev-parse HEAD)$(shell if ! git diff --no-ext-diff --quiet --exit-code; then echo .m; fi)
|
||||
|
||||
ifneq "$(strip $(shell command -v go 2>/dev/null))" ""
|
||||
GOOS ?= $(shell go env GOOS)
|
||||
else
|
||||
GOOS ?= $$GOOS
|
||||
endif
|
||||
|
||||
WHALE = "🇩"
|
||||
ONI = "👹"
|
||||
FIX_PATH = $1
|
||||
ifeq ("$(OS)", "Windows_NT")
|
||||
WHALE="+"
|
||||
ONI="-"
|
||||
FIX_PATH = $(subst /,\,$1)
|
||||
endif
|
||||
GOARCH ?= $(shell go env GOARCH)
|
||||
|
||||
RELEASE=containerd-$(VERSION:v%=%).${GOOS}-${GOARCH}
|
||||
|
||||
PKG=github.com/containerd/containerd
|
||||
|
||||
# Project packages.
|
||||
PACKAGES=$(shell go list ./... | grep -v /vendor/)
|
||||
INTEGRATION_PACKAGE=${PKG}
|
||||
TEST_REQUIRES_ROOT_PACKAGES=$(shell for f in $$(git grep -l testutil.RequiresRoot | grep -v Makefile);do echo "${PKG}/$$(dirname $$f)"; done)
|
||||
|
||||
# Project binaries.
|
||||
COMMANDS=ctr containerd
|
||||
ifneq ("$(GOOS)", "windows")
|
||||
COMMANDS += containerd-shim
|
||||
endif
|
||||
BINARIES=$(addprefix bin/,$(COMMANDS))
|
||||
ifeq ("$(GOOS)", "windows")
|
||||
BINARY_SUFFIX=".exe"
|
||||
endif
|
||||
|
||||
GO_TAGS=$(if $(BUILDTAGS),-tags "$(BUILDTAGS)",)
|
||||
GO_LDFLAGS=-ldflags "-X $(PKG)/version.Version=$(VERSION) -X $(PKG)/version.Revision=$(REVISION) -X $(PKG)/version.Package=$(PKG) $(EXTRA_LDFLAGS)"
|
||||
|
||||
# Flags passed to `go test`
|
||||
TESTFLAGS ?=-parallel 8 -race -v
|
||||
|
||||
.PHONY: clean all AUTHORS fmt vet lint dco build binaries test integration setup generate protos checkprotos coverage ci check help install uninstall vendor release
|
||||
.DEFAULT: default
|
||||
|
||||
all: binaries
|
||||
|
||||
check: fmt vet lint ineffassign ## run fmt, vet, lint, ineffassign
|
||||
|
||||
ci: check binaries checkprotos coverage coverage-integration ## to be used by the CI
|
||||
|
||||
AUTHORS: .mailmap .git/HEAD
|
||||
git log --format='%aN <%aE>' | sort -fu > $@
|
||||
|
||||
setup: ## install dependencies
|
||||
@echo "$(WHALE) $@"
|
||||
# TODO(stevvooe): Install these from the vendor directory
|
||||
@go get -u github.com/golang/lint/golint
|
||||
#@go get -u github.com/kisielk/errcheck
|
||||
@go get -u github.com/gordonklaus/ineffassign
|
||||
@go get -u github.com/stevvooe/protobuild
|
||||
|
||||
generate: protos
|
||||
@echo "$(WHALE) $@"
|
||||
@PATH=${ROOTDIR}/bin:${PATH} go generate -x ${PACKAGES}
|
||||
|
||||
protos: bin/protoc-gen-gogoctrd ## generate protobuf
|
||||
@echo "$(WHALE) $@"
|
||||
@PATH=${ROOTDIR}/bin:${PATH} protobuild ${PACKAGES}
|
||||
|
||||
checkprotos: protos ## check if protobufs needs to be generated again
|
||||
@echo "$(WHALE) $@"
|
||||
@test -z "$$(git status --short | grep ".pb.go" | tee /dev/stderr)" || \
|
||||
((git diff | cat) && \
|
||||
(echo "$(ONI) please run 'make generate' when making changes to proto files" && false))
|
||||
|
||||
# Depends on binaries because vet will silently fail if it can't load compiled
|
||||
# imports
|
||||
vet: binaries ## run go vet
|
||||
@echo "$(WHALE) $@"
|
||||
@test -z "$$(go vet ${PACKAGES} 2>&1 | grep -v 'constant [0-9]* not a string in call to Errorf' | grep -v 'unrecognized printf verb 'r'' | egrep -v '(timestamp_test.go|duration_test.go|fetch.go|exit status 1)' | tee /dev/stderr)"
|
||||
|
||||
fmt: ## run go fmt
|
||||
@echo "$(WHALE) $@"
|
||||
@test -z "$$(gofmt -s -l . | grep -Fv $(call FIX_PATH,'vendor/') | grep -v ".pb.go$$" | tee /dev/stderr)" || \
|
||||
(echo "$(ONI) please format Go code with 'gofmt -s -w'" && false)
|
||||
@test -z "$$(find . -path ./vendor -prune -o ! -name timestamp.proto ! -name duration.proto -name '*.proto' -type f -exec grep -Hn -e "^ " {} \; | tee /dev/stderr)" || \
|
||||
(echo "$(ONI) please indent proto files with tabs only" && false)
|
||||
@test -z "$$(find . -path ./vendor -prune -o -name '*.proto' -type f -exec grep -Hn "Meta meta = " {} \; | grep -v '(gogoproto.nullable) = false' | tee /dev/stderr)" || \
|
||||
(echo "$(ONI) meta fields in proto files must have option (gogoproto.nullable) = false" && false)
|
||||
|
||||
lint: ## run go lint
|
||||
@echo "$(WHALE) $@"
|
||||
@test -z "$$(golint ./... | grep -Fv $(call FIX_PATH,'vendor/') | grep -v ".pb.go:" | tee /dev/stderr)"
|
||||
|
||||
dco: ## dco check
|
||||
@which git-validation > /dev/null 2>/dev/null || (echo "ERROR: git-validation not found" && false)
|
||||
ifdef TRAVIS_COMMIT_RANGE
|
||||
git-validation -q -run DCO,short-subject,dangling-whitespace
|
||||
else
|
||||
git-validation -v -run DCO,short-subject,dangling-whitespace -range $(EPOCH_TEST_COMMIT)..HEAD
|
||||
endif
|
||||
|
||||
ineffassign: ## run ineffassign
|
||||
@echo "$(WHALE) $@"
|
||||
@test -z "$$(ineffassign . | grep -Fv $(call FIX_PATH,'vendor/') | grep -v ".pb.go:" | tee /dev/stderr)"
|
||||
|
||||
#errcheck: ## run go errcheck
|
||||
# @echo "$(WHALE) $@"
|
||||
# @test -z "$$(errcheck ./... | grep -Fv $(call FIX_PATH,'vendor/') | grep -v ".pb.go:" | tee /dev/stderr)"
|
||||
|
||||
build: ## build the go packages
|
||||
@echo "$(WHALE) $@"
|
||||
@go build -v ${EXTRA_FLAGS} ${GO_LDFLAGS} ${GO_GCFLAGS} ${PACKAGES}
|
||||
|
||||
test: ## run tests, except integration tests and tests that require root
|
||||
@echo "$(WHALE) $@"
|
||||
@go test ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES})
|
||||
|
||||
root-test: ## run tests, except integration tests
|
||||
@echo "$(WHALE) $@"
|
||||
@go test ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${TEST_REQUIRES_ROOT_PACKAGES}) -test.root
|
||||
|
||||
integration: ## run integration tests
|
||||
@echo "$(WHALE) $@"
|
||||
@go test ${TESTFLAGS} -test.root
|
||||
|
||||
benchmark: ## run benchmarks tests
|
||||
@echo "$(WHALE) $@"
|
||||
@go test ${TESTFLAGS} -bench . -run Benchmark -test.root
|
||||
|
||||
FORCE:
|
||||
|
||||
# Build a binary from a cmd.
|
||||
bin/%: cmd/% FORCE
|
||||
@echo "$(WHALE) $@${BINARY_SUFFIX}"
|
||||
@go build -o $@${BINARY_SUFFIX} ${GO_LDFLAGS} ${GO_TAGS} ${GO_GCFLAGS} ./$<
|
||||
|
||||
binaries: $(BINARIES) ## build binaries
|
||||
@echo "$(WHALE) $@"
|
||||
|
||||
release: $(BINARIES)
|
||||
@echo "$(WHALE) $@"
|
||||
@mkdir -p releases/${RELEASE}
|
||||
@cp $(BINARIES) releases/$(RELEASE)/
|
||||
@cd releases/$(RELEASE) && tar -czf ../$(RELEASE).tar.gz *
|
||||
|
||||
clean: ## clean up binaries
|
||||
@echo "$(WHALE) $@"
|
||||
@rm -f $(BINARIES)
|
||||
|
||||
install: ## install binaries
|
||||
@echo "$(WHALE) $@ $(BINARIES)"
|
||||
@mkdir -p $(DESTDIR)/bin
|
||||
@install $(BINARIES) $(DESTDIR)/bin
|
||||
|
||||
uninstall:
|
||||
@echo "$(WHALE) $@"
|
||||
@rm -f $(addprefix $(DESTDIR)/bin/,$(notdir $(BINARIES)))
|
||||
|
||||
|
||||
coverage: ## generate coverprofiles from the unit tests, except tests that require root
|
||||
@echo "$(WHALE) $@"
|
||||
@rm -f coverage.txt
|
||||
( for pkg in $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES}); do \
|
||||
go test -i ${TESTFLAGS} -test.short -coverprofile=coverage.out -covermode=atomic $$pkg || exit; \
|
||||
if [ -f profile.out ]; then \
|
||||
cat profile.out >> coverage.txt; \
|
||||
rm profile.out; \
|
||||
fi; \
|
||||
go test ${TESTFLAGS} -test.short -coverprofile=coverage.out -covermode=atomic $$pkg || exit; \
|
||||
if [ -f profile.out ]; then \
|
||||
cat profile.out >> coverage.txt; \
|
||||
rm profile.out; \
|
||||
fi; \
|
||||
done )
|
||||
|
||||
root-coverage: ## generae coverage profiles for the unit tests
|
||||
@echo "$(WHALE) $@"
|
||||
@( for pkg in $(filter-out ${INTEGRATION_PACKAGE},${TEST_REQUIRES_ROOT_PACKAGES}); do \
|
||||
go test -i ${TESTFLAGS} -test.short -coverprofile="../../../$$pkg/coverage.txt" -covermode=atomic $$pkg -test.root || exit; \
|
||||
go test ${TESTFLAGS} -test.short -coverprofile="../../../$$pkg/coverage.txt" -covermode=atomic $$pkg -test.root || exit; \
|
||||
done )
|
||||
|
||||
coverage-integration: ## generate coverprofiles from the integration tests
|
||||
@echo "$(WHALE) $@"
|
||||
go test ${TESTFLAGS} -test.short -coverprofile="../../../${INTEGRATION_PACKAGE}/coverage.txt" -covermode=atomic ${INTEGRATION_PACKAGE} -test.root
|
||||
|
||||
vendor:
|
||||
@echo "$(WHALE) $@"
|
||||
@vndr
|
||||
|
||||
help: ## this help
|
||||
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) | sort
|
29
vendor/github.com/containerd/containerd/Protobuild.toml
generated
vendored
29
vendor/github.com/containerd/containerd/Protobuild.toml
generated
vendored
@ -1,29 +0,0 @@
|
||||
version = "unstable"
|
||||
generator = "gogoctrd"
|
||||
plugins = ["grpc"]
|
||||
|
||||
# Control protoc include paths. Below are usually some good defaults, but feel
|
||||
# free to try it without them if it works for your project.
|
||||
[includes]
|
||||
# Include paths that will be added before all others. Typically, you want to
|
||||
# treat the root of the project as an include, but this may not be necessary.
|
||||
before = ["."]
|
||||
|
||||
# Paths that should be treated as include roots in relation to the vendor
|
||||
# directory. These will be calculated with the vendor directory nearest the
|
||||
# target package.
|
||||
vendored = ["github.com/gogo/protobuf"]
|
||||
|
||||
# Paths that will be added untouched to the end of the includes. We use
|
||||
# `/usr/local/include` to pickup the common install location of protobuf.
|
||||
# This is the default.
|
||||
after = ["/usr/local/include"]
|
||||
|
||||
# This section maps protobuf imports to Go packages. These will become
|
||||
# `-M` directives in the call to the go protobuf generator.
|
||||
[packages]
|
||||
"gogoproto/gogo.proto" = "github.com/gogo/protobuf/gogoproto"
|
||||
"google/protobuf/any.proto" = "github.com/gogo/protobuf/types"
|
||||
"google/protobuf/descriptor.proto" = "github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
|
||||
"google/protobuf/field_mask.proto" = "github.com/gogo/protobuf/types"
|
||||
"google/protobuf/timestamp.proto" = "github.com/gogo/protobuf/types"
|
3
vendor/github.com/containerd/containerd/ROADMAP.md
generated
vendored
3
vendor/github.com/containerd/containerd/ROADMAP.md
generated
vendored
@ -1,3 +0,0 @@
|
||||
# containerd Roadmap
|
||||
|
||||
Please review the milestones on [github](https://github.com/containerd/containerd/milestones) for the updated roadmap and release information.
|
21
vendor/github.com/containerd/containerd/RUNC.md
generated
vendored
21
vendor/github.com/containerd/containerd/RUNC.md
generated
vendored
@ -1,21 +0,0 @@
|
||||
containerd is built with OCI support and with support for advanced features provided by `runc`.
|
||||
|
||||
We depend on a specific runc version when dealing with advanced features. You should have a specific build for development. The current supported runc commit is:
|
||||
|
||||
RUNC_COMMIT = e775f0fba3ea329b8b766451c892c41a3d49594d
|
||||
|
||||
## building
|
||||
|
||||
### apparmor
|
||||
|
||||
```bash
|
||||
make BUILDTAGS='seccomp apparmor' && sudo make install
|
||||
```
|
||||
|
||||
### selinux
|
||||
|
||||
```bash
|
||||
make BUILDTAGS='seccomp selinux' && sudo make install
|
||||
```
|
||||
|
||||
After an official runc release we will start pinning containerd support to a specific version but various development and testing features may require a newer runc version than the latest release. If you encounter any runtime errors, please make sure your runc is in sync with the commit/tag provided in this document.
|
57
vendor/github.com/containerd/containerd/SCOPE.md
generated
vendored
57
vendor/github.com/containerd/containerd/SCOPE.md
generated
vendored
@ -1,57 +0,0 @@
|
||||
# Scope and Principles
|
||||
|
||||
Having a clearly defined scope of a project is important for ensuring consistency and focus.
|
||||
These following criteria will be used when reviewing pull requests, features, and changes for the project before being accepted.
|
||||
|
||||
### Components
|
||||
|
||||
Components should not have tight dependencies on each other so that they are able to be used independently.
|
||||
The APIs for images and containers should be designed in a way that when used together the components have a natural flow but still be useful independently.
|
||||
|
||||
An example for this design can be seen with the overlay filesystems and the container execution layer.
|
||||
The execution layer and overlay filesystems can be used independently but if you were to use both, they share a common `Mount` struct that the filesystems produce and the execution layer consumes.
|
||||
|
||||
### Primitives
|
||||
|
||||
containerd should expose primitives to solve problems instead of building high level abstractions in the API.
|
||||
A common example of this is how build would be implemented.
|
||||
Instead of having a build API in containerd we should expose the lower level primitives that allow things required in build to work.
|
||||
Breaking up the filesystem APIs to allow snapshots, copy functionality, and mounts allow people implementing build at the higher levels with more flexibility.
|
||||
|
||||
### Extensibility and Defaults
|
||||
|
||||
For the various components in containerd there should be defined extension points where implementations can be swapped for alternatives.
|
||||
The best example of this is that containerd will use `runc` from OCI as the default runtime in the execution layer but other runtimes conforming to the OCI Runtime specification can be easily added to containerd.
|
||||
|
||||
containerd will come with a default implementation for the various components.
|
||||
These defaults will be chosen by the maintainers of the project and should not change unless better tech for that component comes out.
|
||||
Additional implementations will not be accepted into the core repository and should be developed in a separate repository not maintained by the containerd maintainers.
|
||||
|
||||
|
||||
## Scope
|
||||
|
||||
The following table specifies the various components of containerd and general features of container runtimes.
|
||||
The table specifies whether or not the feature/component is in or out of scope.
|
||||
|
||||
| Name | Description | In/Out | Reason |
|
||||
|------------------------------|--------------------------------------------------------------------------------------------------------|--------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| execution | Provide an extensible execution layer for executing a container | in | Create,start, stop pause, resume exec, signal, delete |
|
||||
| cow filesystem | Built in functionality for overlay, aufs, and other copy on write filesystems for containers | in | |
|
||||
| distribution | Having the ability to push and pull images as well as operations on images as a first class API object | in | containerd will fully support the management and retrieval of images |
|
||||
| metrics | container-level metrics, cgroup stats, and OOM events | in |
|
||||
| networking | creation and management of network interfaces | out | Networking will be handled and provided to containerd via higher level systems. |
|
||||
| build | Building images as a first class API | out | Build is a higher level tooling feature and can be implemented in many different ways on top of containerd |
|
||||
| volumes | Volume management for external data | out | The API supports mounts, binds, etc where all volumes type systems can be built on top of containerd. |
|
||||
| logging | Persisting container logs | out | Logging can be build on top of containerd because the container’s STDIO will be provided to the clients and they can persist any way they see fit. There is no io copying of container STDIO in containerd. |
|
||||
|
||||
|
||||
containerd is scoped to a single host and makes assumptions based on that fact.
|
||||
It can be used to build things like a node agent that launches containers but does not have any concepts of a distributed system.
|
||||
|
||||
containerd is designed to be embedded into a larger system, hence it only includes a barebone CLI (`ctr`) specifically for development and debugging purpose, with no mandate to be human-friendly, and no guarantee of interface stability over time.
|
||||
|
||||
### How is the scope changed?
|
||||
|
||||
The scope of this project is a whitelist.
|
||||
If it's not mentioned as being in scope, it is out of scope.
|
||||
For the scope of this project to change it requires a 100% vote from all maintainers of the project.
|
3
vendor/github.com/containerd/containerd/code-of-conduct.md
generated
vendored
3
vendor/github.com/containerd/containerd/code-of-conduct.md
generated
vendored
@ -1,3 +0,0 @@
|
||||
## containerd Community Code of Conduct
|
||||
|
||||
containerd follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
|
13
vendor/github.com/containerd/containerd/containerd.service
generated
vendored
13
vendor/github.com/containerd/containerd/containerd.service
generated
vendored
@ -1,13 +0,0 @@
|
||||
[Unit]
|
||||
Description=containerd container runtime
|
||||
Documentation=https://containerd.io
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
ExecStartPre=/sbin/modprobe overlay
|
||||
ExecStart=/usr/local/bin/containerd
|
||||
Delegate=yes
|
||||
KillMode=process
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
8
vendor/github.com/containerd/continuity/AUTHORS
generated
vendored
8
vendor/github.com/containerd/continuity/AUTHORS
generated
vendored
@ -1,8 +0,0 @@
|
||||
Aaron Lehmann <aaron.lehmann@docker.com>
|
||||
Akihiro Suda <suda.akihiro@lab.ntt.co.jp>
|
||||
Brandon Philips <brandon.philips@coreos.com>
|
||||
Derek McGowan <derek@mcgstyle.net>
|
||||
Justin Cormack <justin.cormack@docker.com>
|
||||
Justin Cummins <sul3n3t@gmail.com>
|
||||
Stephen J Day <stephen.day@docker.com>
|
||||
Tonis Tiigi <tonistiigi@gmail.com>
|
19
vendor/github.com/containerd/continuity/README.md
generated
vendored
Normal file
19
vendor/github.com/containerd/continuity/README.md
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
# continuity
|
||||
|
||||
[](https://godoc.org/github.com/containerd/continuity)
|
||||
[](https://travis-ci.org/containerd/continuity)
|
||||
|
||||
A transport-agnostic, filesystem metadata manifest system
|
||||
|
||||
This project is a staging area for experiments in providing transport agnostic
|
||||
metadata storage.
|
||||
|
||||
Please see https://github.com/opencontainers/specs/issues/11 for more details.
|
||||
|
||||
## Building Proto Package
|
||||
|
||||
If you change the proto file you will need to rebuild the generated Go with `go generate`.
|
||||
|
||||
```
|
||||
go generate ./proto
|
||||
```
|
37
vendor/github.com/containerd/continuity/sysx/generate.sh
generated
vendored
37
vendor/github.com/containerd/continuity/sysx/generate.sh
generated
vendored
@ -1,37 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
mksyscall="$(go env GOROOT)/src/syscall/mksyscall.pl"
|
||||
|
||||
fix() {
|
||||
sed 's,^package syscall$,package sysx,' \
|
||||
| sed 's,^import "unsafe"$,import (\n\t"syscall"\n\t"unsafe"\n),' \
|
||||
| gofmt -r='BytePtrFromString -> syscall.BytePtrFromString' \
|
||||
| gofmt -r='Syscall6 -> syscall.Syscall6' \
|
||||
| gofmt -r='Syscall -> syscall.Syscall' \
|
||||
| gofmt -r='SYS_GETXATTR -> syscall.SYS_GETXATTR' \
|
||||
| gofmt -r='SYS_LISTXATTR -> syscall.SYS_LISTXATTR' \
|
||||
| gofmt -r='SYS_SETXATTR -> syscall.SYS_SETXATTR' \
|
||||
| gofmt -r='SYS_REMOVEXATTR -> syscall.SYS_REMOVEXATTR' \
|
||||
| gofmt -r='SYS_LGETXATTR -> syscall.SYS_LGETXATTR' \
|
||||
| gofmt -r='SYS_LLISTXATTR -> syscall.SYS_LLISTXATTR' \
|
||||
| gofmt -r='SYS_LSETXATTR -> syscall.SYS_LSETXATTR' \
|
||||
| gofmt -r='SYS_LREMOVEXATTR -> syscall.SYS_LREMOVEXATTR'
|
||||
}
|
||||
|
||||
if [ "$GOARCH" == "" ] || [ "$GOOS" == "" ]; then
|
||||
echo "Must specify \$GOARCH and \$GOOS"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mkargs=""
|
||||
|
||||
if [ "$GOARCH" == "386" ] || [ "$GOARCH" == "arm" ]; then
|
||||
mkargs="-l32"
|
||||
fi
|
||||
|
||||
for f in "$@"; do
|
||||
$mksyscall $mkargs "${f}_${GOOS}.go" | fix > "${f}_${GOOS}_${GOARCH}.go"
|
||||
done
|
||||
|
3
vendor/github.com/containerd/fifo/.travis.yml
generated
vendored
3
vendor/github.com/containerd/fifo/.travis.yml
generated
vendored
@ -1,3 +0,0 @@
|
||||
language: go
|
||||
go:
|
||||
- 1.7.x
|
13
vendor/github.com/containerd/fifo/Makefile
generated
vendored
13
vendor/github.com/containerd/fifo/Makefile
generated
vendored
@ -1,13 +0,0 @@
|
||||
.PHONY: fmt vet test deps
|
||||
|
||||
test: deps
|
||||
go test -v ./...
|
||||
|
||||
deps:
|
||||
go get -d -t ./...
|
||||
|
||||
fmt:
|
||||
gofmt -s -l .
|
||||
|
||||
vet:
|
||||
go vet ./...
|
176
vendor/github.com/containernetworking/cni/README.md
generated
vendored
Normal file
176
vendor/github.com/containernetworking/cni/README.md
generated
vendored
Normal file
@ -0,0 +1,176 @@
|
||||
[](https://travis-ci.org/containernetworking/cni)
|
||||
[](https://coveralls.io/github/containernetworking/cni?branch=master)
|
||||
[](https://cryptic-tundra-43194.herokuapp.com/)
|
||||
|
||||
# CNI - the Container Network Interface
|
||||
|
||||
## What is CNI?
|
||||
|
||||
The CNI (_Container Network Interface_) project consists of a specification and libraries for writing plugins to configure network interfaces in Linux containers, along with a number of supported plugins.
|
||||
CNI concerns itself only with network connectivity of containers and removing allocated resources when the container is deleted.
|
||||
Because of this focus, CNI has a wide range of support and the specification is simple to implement.
|
||||
|
||||
As well as the [specification](SPEC.md), this repository contains the Go source code of a library for integrating CNI into applications, an example command-line tool, a template for making new plugins, and the supported plugins.
|
||||
|
||||
The template code makes it straight-forward to create a CNI plugin for an existing container networking project.
|
||||
CNI also makes a good framework for creating a new container networking project from scratch.
|
||||
|
||||
## Why develop CNI?
|
||||
|
||||
Application containers on Linux are a rapidly evolving area, and within this area networking is not well addressed as it is highly environment-specific.
|
||||
We believe that many container runtimes and orchestrators will seek to solve the same problem of making the network layer pluggable.
|
||||
|
||||
To avoid duplication, we think it is prudent to define a common interface between the network plugins and container execution: hence we put forward this specification, along with libraries for Go and a set of plugins.
|
||||
|
||||
## Who is using CNI?
|
||||
### Container runtimes
|
||||
- [rkt - container engine](https://coreos.com/blog/rkt-cni-networking.html)
|
||||
- [Kurma - container runtime](http://kurma.io/)
|
||||
- [Kubernetes - a system to simplify container operations](http://kubernetes.io/docs/admin/network-plugins/)
|
||||
- [Cloud Foundry - a platform for cloud applications](https://github.com/cloudfoundry-incubator/netman-release)
|
||||
- [Mesos - a distributed systems kernel](https://github.com/apache/mesos/blob/master/docs/cni.md)
|
||||
|
||||
### 3rd party plugins
|
||||
- [Project Calico - a layer 3 virtual network](https://github.com/projectcalico/calico-cni)
|
||||
- [Weave - a multi-host Docker network](https://github.com/weaveworks/weave)
|
||||
- [Contiv Networking - policy networking for various use cases](https://github.com/contiv/netplugin)
|
||||
- [SR-IOV](https://github.com/hustcat/sriov-cni)
|
||||
- [Cilium - BPF & XDP for containers](https://github.com/cilium/cilium)
|
||||
- [Infoblox - enterprise IP address management for containers](https://github.com/infobloxopen/cni-infoblox)
|
||||
|
||||
The CNI team also maintains some [core plugins](plugins).
|
||||
|
||||
|
||||
## Contributing to CNI
|
||||
|
||||
We welcome contributions, including [bug reports](https://github.com/containernetworking/cni/issues), and code and documentation improvements.
|
||||
If you intend to contribute to code or documentation, please read [CONTRIBUTING.md](CONTRIBUTING.md). Also see the [contact section](#contact) in this README.
|
||||
|
||||
## How do I use CNI?
|
||||
|
||||
### Requirements
|
||||
|
||||
CNI requires Go 1.5+ to build.
|
||||
|
||||
Go 1.5 users will need to set GO15VENDOREXPERIMENT=1 to get vendored
|
||||
dependencies. This flag is set by default in 1.6.
|
||||
|
||||
### Included Plugins
|
||||
|
||||
This repository includes a number of common plugins in the `plugins/` directory.
|
||||
Please see the [Documentation/](Documentation/) directory for documentation about particular plugins.
|
||||
|
||||
### Running the plugins
|
||||
|
||||
The scripts/ directory contains two scripts, `priv-net-run.sh` and `docker-run.sh`, that can be used to exercise the plugins.
|
||||
|
||||
**note - priv-net-run.sh depends on `jq`**
|
||||
|
||||
Start out by creating a netconf file to describe a network:
|
||||
|
||||
```bash
|
||||
$ mkdir -p /etc/cni/net.d
|
||||
$ cat >/etc/cni/net.d/10-mynet.conf <<EOF
|
||||
{
|
||||
"cniVersion": "0.2.0",
|
||||
"name": "mynet",
|
||||
"type": "bridge",
|
||||
"bridge": "cni0",
|
||||
"isGateway": true,
|
||||
"ipMasq": true,
|
||||
"ipam": {
|
||||
"type": "host-local",
|
||||
"subnet": "10.22.0.0/16",
|
||||
"routes": [
|
||||
{ "dst": "0.0.0.0/0" }
|
||||
]
|
||||
}
|
||||
}
|
||||
EOF
|
||||
$ cat >/etc/cni/net.d/99-loopback.conf <<EOF
|
||||
{
|
||||
"cniVersion": "0.2.0",
|
||||
"type": "loopback"
|
||||
}
|
||||
EOF
|
||||
```
|
||||
|
||||
The directory `/etc/cni/net.d` is the default location in which the scripts will look for net configurations.
|
||||
|
||||
Next, build the plugins:
|
||||
|
||||
```bash
|
||||
$ ./build
|
||||
```
|
||||
|
||||
Finally, execute a command (`ifconfig` in this example) in a private network namespace that has joined the `mynet` network:
|
||||
|
||||
```bash
|
||||
$ CNI_PATH=`pwd`/bin
|
||||
$ cd scripts
|
||||
$ sudo CNI_PATH=$CNI_PATH ./priv-net-run.sh ifconfig
|
||||
eth0 Link encap:Ethernet HWaddr f2:c2:6f:54:b8:2b
|
||||
inet addr:10.22.0.2 Bcast:0.0.0.0 Mask:255.255.0.0
|
||||
inet6 addr: fe80::f0c2:6fff:fe54:b82b/64 Scope:Link
|
||||
UP BROADCAST MULTICAST MTU:1500 Metric:1
|
||||
RX packets:1 errors:0 dropped:0 overruns:0 frame:0
|
||||
TX packets:0 errors:0 dropped:1 overruns:0 carrier:0
|
||||
collisions:0 txqueuelen:0
|
||||
RX bytes:90 (90.0 B) TX bytes:0 (0.0 B)
|
||||
|
||||
lo Link encap:Local Loopback
|
||||
inet addr:127.0.0.1 Mask:255.0.0.0
|
||||
inet6 addr: ::1/128 Scope:Host
|
||||
UP LOOPBACK RUNNING MTU:65536 Metric:1
|
||||
RX packets:0 errors:0 dropped:0 overruns:0 frame:0
|
||||
TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
|
||||
collisions:0 txqueuelen:0
|
||||
RX bytes:0 (0.0 B) TX bytes:0 (0.0 B)
|
||||
```
|
||||
|
||||
The environment variable `CNI_PATH` tells the scripts and library where to look for plugin executables.
|
||||
|
||||
## Running a Docker container with network namespace set up by CNI plugins
|
||||
|
||||
Use the instructions in the previous section to define a netconf and build the plugins.
|
||||
Next, docker-run.sh script wraps `docker run`, to execute the plugins prior to entering the container:
|
||||
|
||||
```bash
|
||||
$ CNI_PATH=`pwd`/bin
|
||||
$ cd scripts
|
||||
$ sudo CNI_PATH=$CNI_PATH ./docker-run.sh --rm busybox:latest ifconfig
|
||||
eth0 Link encap:Ethernet HWaddr fa:60:70:aa:07:d1
|
||||
inet addr:10.22.0.2 Bcast:0.0.0.0 Mask:255.255.0.0
|
||||
inet6 addr: fe80::f860:70ff:feaa:7d1/64 Scope:Link
|
||||
UP BROADCAST MULTICAST MTU:1500 Metric:1
|
||||
RX packets:1 errors:0 dropped:0 overruns:0 frame:0
|
||||
TX packets:0 errors:0 dropped:1 overruns:0 carrier:0
|
||||
collisions:0 txqueuelen:0
|
||||
RX bytes:90 (90.0 B) TX bytes:0 (0.0 B)
|
||||
|
||||
lo Link encap:Local Loopback
|
||||
inet addr:127.0.0.1 Mask:255.0.0.0
|
||||
inet6 addr: ::1/128 Scope:Host
|
||||
UP LOOPBACK RUNNING MTU:65536 Metric:1
|
||||
RX packets:0 errors:0 dropped:0 overruns:0 frame:0
|
||||
TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
|
||||
collisions:0 txqueuelen:0
|
||||
RX bytes:0 (0.0 B) TX bytes:0 (0.0 B)
|
||||
```
|
||||
|
||||
## What might CNI do in the future?
|
||||
|
||||
CNI currently covers a wide range of needs for network configuration due to it simple model and API.
|
||||
However, in the future CNI might want to branch out into other directions:
|
||||
|
||||
- Dynamic updates to existing network configuration
|
||||
- Dynamic policies for network bandwidth and firewall rules
|
||||
|
||||
If these topics of are interest, please contact the team via the mailing list or IRC and find some like-minded people in the community to put a proposal together.
|
||||
|
||||
## Contact
|
||||
|
||||
For any questions about CNI, please reach out on the mailing list:
|
||||
- Email: [cni-dev](https://groups.google.com/forum/#!forum/cni-dev)
|
||||
- IRC: #[containernetworking](irc://irc.freenode.org:6667/#containernetworking) channel on freenode.org
|
||||
- Slack: [containernetworking.slack.com](https://cryptic-tundra-43194.herokuapp.com)
|
205
vendor/github.com/davecgh/go-spew/README.md
generated
vendored
Normal file
205
vendor/github.com/davecgh/go-spew/README.md
generated
vendored
Normal file
@ -0,0 +1,205 @@
|
||||
go-spew
|
||||
=======
|
||||
|
||||
[]
|
||||
(https://travis-ci.org/davecgh/go-spew) [![ISC License]
|
||||
(http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org) [![Coverage Status]
|
||||
(https://img.shields.io/coveralls/davecgh/go-spew.svg)]
|
||||
(https://coveralls.io/r/davecgh/go-spew?branch=master)
|
||||
|
||||
|
||||
Go-spew implements a deep pretty printer for Go data structures to aid in
|
||||
debugging. A comprehensive suite of tests with 100% test coverage is provided
|
||||
to ensure proper functionality. See `test_coverage.txt` for the gocov coverage
|
||||
report. Go-spew is licensed under the liberal ISC license, so it may be used in
|
||||
open source or commercial projects.
|
||||
|
||||
If you're interested in reading about how this package came to life and some
|
||||
of the challenges involved in providing a deep pretty printer, there is a blog
|
||||
post about it
|
||||
[here](https://web.archive.org/web/20160304013555/https://blog.cyphertite.com/go-spew-a-journey-into-dumping-go-data-structures/).
|
||||
|
||||
## Documentation
|
||||
|
||||
[]
|
||||
(http://godoc.org/github.com/davecgh/go-spew/spew)
|
||||
|
||||
Full `go doc` style documentation for the project can be viewed online without
|
||||
installing this package by using the excellent GoDoc site here:
|
||||
http://godoc.org/github.com/davecgh/go-spew/spew
|
||||
|
||||
You can also view the documentation locally once the package is installed with
|
||||
the `godoc` tool by running `godoc -http=":6060"` and pointing your browser to
|
||||
http://localhost:6060/pkg/github.com/davecgh/go-spew/spew
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
$ go get -u github.com/davecgh/go-spew/spew
|
||||
```
|
||||
|
||||
## Quick Start
|
||||
|
||||
Add this import line to the file you're working in:
|
||||
|
||||
```Go
|
||||
import "github.com/davecgh/go-spew/spew"
|
||||
```
|
||||
|
||||
To dump a variable with full newlines, indentation, type, and pointer
|
||||
information use Dump, Fdump, or Sdump:
|
||||
|
||||
```Go
|
||||
spew.Dump(myVar1, myVar2, ...)
|
||||
spew.Fdump(someWriter, myVar1, myVar2, ...)
|
||||
str := spew.Sdump(myVar1, myVar2, ...)
|
||||
```
|
||||
|
||||
Alternatively, if you would prefer to use format strings with a compacted inline
|
||||
printing style, use the convenience wrappers Printf, Fprintf, etc with %v (most
|
||||
compact), %+v (adds pointer addresses), %#v (adds types), or %#+v (adds types
|
||||
and pointer addresses):
|
||||
|
||||
```Go
|
||||
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||
spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||
spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||
```
|
||||
|
||||
## Debugging a Web Application Example
|
||||
|
||||
Here is an example of how you can use `spew.Sdump()` to help debug a web application. Please be sure to wrap your output using the `html.EscapeString()` function for safety reasons. You should also only use this debugging technique in a development environment, never in production.
|
||||
|
||||
```Go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"html"
|
||||
"net/http"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
)
|
||||
|
||||
func handler(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "text/html")
|
||||
fmt.Fprintf(w, "Hi there, %s!", r.URL.Path[1:])
|
||||
fmt.Fprintf(w, "<!--\n" + html.EscapeString(spew.Sdump(w)) + "\n-->")
|
||||
}
|
||||
|
||||
func main() {
|
||||
http.HandleFunc("/", handler)
|
||||
http.ListenAndServe(":8080", nil)
|
||||
}
|
||||
```
|
||||
|
||||
## Sample Dump Output
|
||||
|
||||
```
|
||||
(main.Foo) {
|
||||
unexportedField: (*main.Bar)(0xf84002e210)({
|
||||
flag: (main.Flag) flagTwo,
|
||||
data: (uintptr) <nil>
|
||||
}),
|
||||
ExportedField: (map[interface {}]interface {}) {
|
||||
(string) "one": (bool) true
|
||||
}
|
||||
}
|
||||
([]uint8) {
|
||||
00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
|
||||
00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
|
||||
00000020 31 32 |12|
|
||||
}
|
||||
```
|
||||
|
||||
## Sample Formatter Output
|
||||
|
||||
Double pointer to a uint8:
|
||||
```
|
||||
%v: <**>5
|
||||
%+v: <**>(0xf8400420d0->0xf8400420c8)5
|
||||
%#v: (**uint8)5
|
||||
%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
|
||||
```
|
||||
|
||||
Pointer to circular struct with a uint8 field and a pointer to itself:
|
||||
```
|
||||
%v: <*>{1 <*><shown>}
|
||||
%+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
|
||||
%#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
|
||||
%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
|
||||
```
|
||||
|
||||
## Configuration Options
|
||||
|
||||
Configuration of spew is handled by fields in the ConfigState type. For
|
||||
convenience, all of the top-level functions use a global state available via the
|
||||
spew.Config global.
|
||||
|
||||
It is also possible to create a ConfigState instance that provides methods
|
||||
equivalent to the top-level functions. This allows concurrent configuration
|
||||
options. See the ConfigState documentation for more details.
|
||||
|
||||
```
|
||||
* Indent
|
||||
String to use for each indentation level for Dump functions.
|
||||
It is a single space by default. A popular alternative is "\t".
|
||||
|
||||
* MaxDepth
|
||||
Maximum number of levels to descend into nested data structures.
|
||||
There is no limit by default.
|
||||
|
||||
* DisableMethods
|
||||
Disables invocation of error and Stringer interface methods.
|
||||
Method invocation is enabled by default.
|
||||
|
||||
* DisablePointerMethods
|
||||
Disables invocation of error and Stringer interface methods on types
|
||||
which only accept pointer receivers from non-pointer variables. This option
|
||||
relies on access to the unsafe package, so it will not have any effect when
|
||||
running in environments without access to the unsafe package such as Google
|
||||
App Engine or with the "safe" build tag specified.
|
||||
Pointer method invocation is enabled by default.
|
||||
|
||||
* DisablePointerAddresses
|
||||
DisablePointerAddresses specifies whether to disable the printing of
|
||||
pointer addresses. This is useful when diffing data structures in tests.
|
||||
|
||||
* DisableCapacities
|
||||
DisableCapacities specifies whether to disable the printing of capacities
|
||||
for arrays, slices, maps and channels. This is useful when diffing data
|
||||
structures in tests.
|
||||
|
||||
* ContinueOnMethod
|
||||
Enables recursion into types after invoking error and Stringer interface
|
||||
methods. Recursion after method invocation is disabled by default.
|
||||
|
||||
* SortKeys
|
||||
Specifies map keys should be sorted before being printed. Use
|
||||
this to have a more deterministic, diffable output. Note that
|
||||
only native types (bool, int, uint, floats, uintptr and string)
|
||||
and types which implement error or Stringer interfaces are supported,
|
||||
with other types sorted according to the reflect.Value.String() output
|
||||
which guarantees display stability. Natural map order is used by
|
||||
default.
|
||||
|
||||
* SpewKeys
|
||||
SpewKeys specifies that, as a last resort attempt, map keys should be
|
||||
spewed to strings and sorted by those strings. This is only considered
|
||||
if SortKeys is true.
|
||||
|
||||
```
|
||||
|
||||
## Unsafe Package Dependency
|
||||
|
||||
This package relies on the unsafe package to perform some of the more advanced
|
||||
features, however it also supports a "limited" mode which allows it to work in
|
||||
environments where the unsafe package is not available. By default, it will
|
||||
operate in this mode on Google App Engine and when compiled with GopherJS. The
|
||||
"safe" build tag may also be specified to force the package to build without
|
||||
using the unsafe package.
|
||||
|
||||
## License
|
||||
|
||||
Go-spew is licensed under the [copyfree](http://copyfree.org) ISC License.
|
182
vendor/github.com/docker/distribution/AUTHORS
generated
vendored
182
vendor/github.com/docker/distribution/AUTHORS
generated
vendored
@ -1,182 +0,0 @@
|
||||
a-palchikov <deemok@gmail.com>
|
||||
Aaron Lehmann <aaron.lehmann@docker.com>
|
||||
Aaron Schlesinger <aschlesinger@deis.com>
|
||||
Aaron Vinson <avinson.public@gmail.com>
|
||||
Adam Duke <adam.v.duke@gmail.com>
|
||||
Adam Enger <adamenger@gmail.com>
|
||||
Adrian Mouat <adrian.mouat@gmail.com>
|
||||
Ahmet Alp Balkan <ahmetalpbalkan@gmail.com>
|
||||
Alex Chan <alex.chan@metaswitch.com>
|
||||
Alex Elman <aelman@indeed.com>
|
||||
Alexey Gladkov <gladkov.alexey@gmail.com>
|
||||
allencloud <allen.sun@daocloud.io>
|
||||
amitshukla <ashukla73@hotmail.com>
|
||||
Amy Lindburg <amy.lindburg@docker.com>
|
||||
Andrew Hsu <andrewhsu@acm.org>
|
||||
Andrew Meredith <andymeredith@gmail.com>
|
||||
Andrew T Nguyen <andrew.nguyen@docker.com>
|
||||
Andrey Kostov <kostov.andrey@gmail.com>
|
||||
Andy Goldstein <agoldste@redhat.com>
|
||||
Anis Elleuch <vadmeste@gmail.com>
|
||||
Anton Tiurin <noxiouz@yandex.ru>
|
||||
Antonio Mercado <amercado@thinknode.com>
|
||||
Antonio Murdaca <runcom@redhat.com>
|
||||
Anusha Ragunathan <anusha@docker.com>
|
||||
Arien Holthuizen <aholthuizen@schubergphilis.com>
|
||||
Arnaud Porterie <arnaud.porterie@docker.com>
|
||||
Arthur Baars <arthur@semmle.com>
|
||||
Asuka Suzuki <hello@tanksuzuki.com>
|
||||
Avi Miller <avi.miller@oracle.com>
|
||||
Ayose Cazorla <ayosec@gmail.com>
|
||||
BadZen <dave.trombley@gmail.com>
|
||||
Ben Bodenmiller <bbodenmiller@hotmail.com>
|
||||
Ben Firshman <ben@firshman.co.uk>
|
||||
bin liu <liubin0329@gmail.com>
|
||||
Brian Bland <brian.bland@docker.com>
|
||||
burnettk <burnettk@gmail.com>
|
||||
Carson A <ca@carsonoid.net>
|
||||
Cezar Sa Espinola <cezarsa@gmail.com>
|
||||
Charles Smith <charles.smith@docker.com>
|
||||
Chris Dillon <squarism@gmail.com>
|
||||
cuiwei13 <cuiwei13@pku.edu.cn>
|
||||
cyli <cyli@twistedmatrix.com>
|
||||
Daisuke Fujita <dtanshi45@gmail.com>
|
||||
Daniel Huhn <daniel@danielhuhn.de>
|
||||
Darren Shepherd <darren@rancher.com>
|
||||
Dave Trombley <dave.trombley@gmail.com>
|
||||
Dave Tucker <dt@docker.com>
|
||||
David Lawrence <david.lawrence@docker.com>
|
||||
David Verhasselt <david@crowdway.com>
|
||||
David Xia <dxia@spotify.com>
|
||||
davidli <wenquan.li@hp.com>
|
||||
Dejan Golja <dejan@golja.org>
|
||||
Derek McGowan <derek@mcgstyle.net>
|
||||
Diogo Mónica <diogo.monica@gmail.com>
|
||||
DJ Enriquez <dj.enriquez@infospace.com>
|
||||
Donald Huang <don.hcd@gmail.com>
|
||||
Doug Davis <dug@us.ibm.com>
|
||||
Edgar Lee <edgar.lee@docker.com>
|
||||
Eric Yang <windfarer@gmail.com>
|
||||
Fabio Berchtold <jamesclonk@jamesclonk.ch>
|
||||
Fabio Huser <fabio@fh1.ch>
|
||||
farmerworking <farmerworking@gmail.com>
|
||||
Felix Yan <felixonmars@archlinux.org>
|
||||
Florentin Raud <florentin.raud@gmail.com>
|
||||
Frank Chen <frankchn@gmail.com>
|
||||
Frederick F. Kautz IV <fkautz@alumni.cmu.edu>
|
||||
gabriell nascimento <gabriell@bluesoft.com.br>
|
||||
Gleb Schukin <gschukin@ptsecurity.com>
|
||||
harche <p.harshal@gmail.com>
|
||||
Henri Gomez <henri.gomez@gmail.com>
|
||||
Hu Keping <hukeping@huawei.com>
|
||||
Hua Wang <wanghua.humble@gmail.com>
|
||||
HuKeping <hukeping@huawei.com>
|
||||
Ian Babrou <ibobrik@gmail.com>
|
||||
igayoso <igayoso@gmail.com>
|
||||
Jack Griffin <jackpg14@gmail.com>
|
||||
James Findley <jfindley@fastmail.com>
|
||||
Jason Freidman <jason.freidman@gmail.com>
|
||||
Jason Heiss <jheiss@aput.net>
|
||||
Jeff Nickoloff <jeff@allingeek.com>
|
||||
Jess Frazelle <acidburn@google.com>
|
||||
Jessie Frazelle <jessie@docker.com>
|
||||
jhaohai <jhaohai@foxmail.com>
|
||||
Jianqing Wang <tsing@jianqing.org>
|
||||
Jihoon Chung <jihoon@gmail.com>
|
||||
Joao Fernandes <joao.fernandes@docker.com>
|
||||
John Mulhausen <john@docker.com>
|
||||
John Starks <jostarks@microsoft.com>
|
||||
Jon Johnson <jonjohnson@google.com>
|
||||
Jon Poler <jonathan.poler@apcera.com>
|
||||
Jonathan Boulle <jonathanboulle@gmail.com>
|
||||
Jordan Liggitt <jliggitt@redhat.com>
|
||||
Josh Chorlton <josh.chorlton@docker.com>
|
||||
Josh Hawn <josh.hawn@docker.com>
|
||||
Julien Fernandez <julien.fernandez@gmail.com>
|
||||
Ke Xu <leonhartx.k@gmail.com>
|
||||
Keerthan Mala <kmala@engineyard.com>
|
||||
Kelsey Hightower <kelsey.hightower@gmail.com>
|
||||
Kenneth Lim <kennethlimcp@gmail.com>
|
||||
Kenny Leung <kleung@google.com>
|
||||
Li Yi <denverdino@gmail.com>
|
||||
Liu Hua <sdu.liu@huawei.com>
|
||||
liuchang0812 <liuchang0812@gmail.com>
|
||||
Lloyd Ramey <lnr0626@gmail.com>
|
||||
Louis Kottmann <louis.kottmann@gmail.com>
|
||||
Luke Carpenter <x@rubynerd.net>
|
||||
Marcus Martins <marcus@docker.com>
|
||||
Mary Anthony <mary@docker.com>
|
||||
Matt Bentley <mbentley@mbentley.net>
|
||||
Matt Duch <matt@learnmetrics.com>
|
||||
Matt Moore <mattmoor@google.com>
|
||||
Matt Robenolt <matt@ydekproductions.com>
|
||||
Matthew Green <greenmr@live.co.uk>
|
||||
Michael Prokop <mika@grml.org>
|
||||
Michal Minar <miminar@redhat.com>
|
||||
Michal Minář <miminar@redhat.com>
|
||||
Mike Brown <brownwm@us.ibm.com>
|
||||
Miquel Sabaté <msabate@suse.com>
|
||||
Misty Stanley-Jones <misty@apache.org>
|
||||
Misty Stanley-Jones <misty@docker.com>
|
||||
Morgan Bauer <mbauer@us.ibm.com>
|
||||
moxiegirl <mary@docker.com>
|
||||
Nathan Sullivan <nathan@nightsys.net>
|
||||
nevermosby <robolwq@qq.com>
|
||||
Nghia Tran <tcnghia@gmail.com>
|
||||
Nikita Tarasov <nikita@mygento.ru>
|
||||
Noah Treuhaft <noah.treuhaft@docker.com>
|
||||
Nuutti Kotivuori <nuutti.kotivuori@poplatek.fi>
|
||||
Oilbeater <liumengxinfly@gmail.com>
|
||||
Olivier Gambier <olivier@docker.com>
|
||||
Olivier Jacques <olivier.jacques@hp.com>
|
||||
Omer Cohen <git@omer.io>
|
||||
Patrick Devine <patrick.devine@docker.com>
|
||||
Phil Estes <estesp@linux.vnet.ibm.com>
|
||||
Philip Misiowiec <philip@atlashealth.com>
|
||||
Pierre-Yves Ritschard <pyr@spootnik.org>
|
||||
Qiao Anran <qiaoanran@gmail.com>
|
||||
Randy Barlow <randy@electronsweatshop.com>
|
||||
Richard Scothern <richard.scothern@docker.com>
|
||||
Rodolfo Carvalho <rhcarvalho@gmail.com>
|
||||
Rusty Conover <rusty@luckydinosaur.com>
|
||||
Sean Boran <Boran@users.noreply.github.com>
|
||||
Sebastiaan van Stijn <github@gone.nl>
|
||||
Sebastien Coavoux <s.coavoux@free.fr>
|
||||
Serge Dubrouski <sergeyfd@gmail.com>
|
||||
Sharif Nassar <sharif@mrwacky.com>
|
||||
Shawn Falkner-Horine <dreadpirateshawn@gmail.com>
|
||||
Shreyas Karnik <karnik.shreyas@gmail.com>
|
||||
Simon Thulbourn <simon+github@thulbourn.com>
|
||||
spacexnice <yaoyao.xyy@alibaba-inc.com>
|
||||
Spencer Rinehart <anubis@overthemonkey.com>
|
||||
Stan Hu <stanhu@gmail.com>
|
||||
Stefan Majewsky <stefan.majewsky@sap.com>
|
||||
Stefan Weil <sw@weilnetz.de>
|
||||
Stephen J Day <stephen.day@docker.com>
|
||||
Sungho Moon <sungho.moon@navercorp.com>
|
||||
Sven Dowideit <SvenDowideit@home.org.au>
|
||||
Sylvain Baubeau <sbaubeau@redhat.com>
|
||||
Ted Reed <ted.reed@gmail.com>
|
||||
tgic <farmer1992@gmail.com>
|
||||
Thomas Sjögren <konstruktoid@users.noreply.github.com>
|
||||
Tianon Gravi <admwiggin@gmail.com>
|
||||
Tibor Vass <teabee89@gmail.com>
|
||||
Tonis Tiigi <tonistiigi@gmail.com>
|
||||
Tony Holdstock-Brown <tony@docker.com>
|
||||
Trevor Pounds <trevor.pounds@gmail.com>
|
||||
Troels Thomsen <troels@thomsen.io>
|
||||
Victor Vieux <vieux@docker.com>
|
||||
Victoria Bialas <victoria.bialas@docker.com>
|
||||
Vincent Batts <vbatts@redhat.com>
|
||||
Vincent Demeester <vincent@sbr.pm>
|
||||
Vincent Giersch <vincent.giersch@ovh.net>
|
||||
W. Trevor King <wking@tremily.us>
|
||||
weiyuan.yl <weiyuan.yl@alibaba-inc.com>
|
||||
xg.song <xg.song@venusource.com>
|
||||
xiekeyang <xiekeyang@huawei.com>
|
||||
Yann ROBERT <yann.robert@anantaplex.fr>
|
||||
yaoyao.xyy <yaoyao.xyy@alibaba-inc.com>
|
||||
yuexiao-wang <wang.yuexiao@zte.com.cn>
|
||||
yuzou <zouyu7@huawei.com>
|
||||
zhouhaibing089 <zhouhaibing089@gmail.com>
|
||||
姜继忠 <jizhong.jiangjz@alibaba-inc.com>
|
131
vendor/github.com/docker/distribution/README.md
generated
vendored
Normal file
131
vendor/github.com/docker/distribution/README.md
generated
vendored
Normal file
@ -0,0 +1,131 @@
|
||||
# Distribution
|
||||
|
||||
The Docker toolset to pack, ship, store, and deliver content.
|
||||
|
||||
This repository's main product is the Docker Registry 2.0 implementation
|
||||
for storing and distributing Docker images. It supersedes the
|
||||
[docker/docker-registry](https://github.com/docker/docker-registry)
|
||||
project with a new API design, focused around security and performance.
|
||||
|
||||
<img src="https://www.docker.com/sites/default/files/oyster-registry-3.png" width=200px/>
|
||||
|
||||
[](https://circleci.com/gh/docker/distribution/tree/master)
|
||||
[](https://godoc.org/github.com/docker/distribution)
|
||||
|
||||
This repository contains the following components:
|
||||
|
||||
|**Component** |Description |
|
||||
|--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| **registry** | An implementation of the [Docker Registry HTTP API V2](docs/spec/api.md) for use with docker 1.6+. |
|
||||
| **libraries** | A rich set of libraries for interacting with distribution components. Please see [godoc](https://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. |
|
||||
| **specifications** | _Distribution_ related specifications are available in [docs/spec](docs/spec) |
|
||||
| **documentation** | Docker's full documentation set is available at [docs.docker.com](https://docs.docker.com). This repository [contains the subset](docs/) related just to the registry. |
|
||||
|
||||
### How does this integrate with Docker engine?
|
||||
|
||||
This project should provide an implementation to a V2 API for use in the [Docker
|
||||
core project](https://github.com/docker/docker). The API should be embeddable
|
||||
and simplify the process of securely pulling and pushing content from `docker`
|
||||
daemons.
|
||||
|
||||
### What are the long term goals of the Distribution project?
|
||||
|
||||
The _Distribution_ project has the further long term goal of providing a
|
||||
secure tool chain for distributing content. The specifications, APIs and tools
|
||||
should be as useful with Docker as they are without.
|
||||
|
||||
Our goal is to design a professional grade and extensible content distribution
|
||||
system that allow users to:
|
||||
|
||||
* Enjoy an efficient, secured and reliable way to store, manage, package and
|
||||
exchange content
|
||||
* Hack/roll their own on top of healthy open-source components
|
||||
* Implement their own home made solution through good specs, and solid
|
||||
extensions mechanism.
|
||||
|
||||
## More about Registry 2.0
|
||||
|
||||
The new registry implementation provides the following benefits:
|
||||
|
||||
- faster push and pull
|
||||
- new, more efficient implementation
|
||||
- simplified deployment
|
||||
- pluggable storage backend
|
||||
- webhook notifications
|
||||
|
||||
For information on upcoming functionality, please see [ROADMAP.md](ROADMAP.md).
|
||||
|
||||
### Who needs to deploy a registry?
|
||||
|
||||
By default, Docker users pull images from Docker's public registry instance.
|
||||
[Installing Docker](https://docs.docker.com/engine/installation/) gives users this
|
||||
ability. Users can also push images to a repository on Docker's public registry,
|
||||
if they have a [Docker Hub](https://hub.docker.com/) account.
|
||||
|
||||
For some users and even companies, this default behavior is sufficient. For
|
||||
others, it is not.
|
||||
|
||||
For example, users with their own software products may want to maintain a
|
||||
registry for private, company images. Also, you may wish to deploy your own
|
||||
image repository for images used to test or in continuous integration. For these
|
||||
use cases and others, [deploying your own registry instance](https://github.com/docker/docker.github.io/blob/master/registry/deploying.md)
|
||||
may be the better choice.
|
||||
|
||||
### Migration to Registry 2.0
|
||||
|
||||
For those who have previously deployed their own registry based on the Registry
|
||||
1.0 implementation and wish to deploy a Registry 2.0 while retaining images,
|
||||
data migration is required. A tool to assist with migration efforts has been
|
||||
created. For more information see [docker/migrator]
|
||||
(https://github.com/docker/migrator).
|
||||
|
||||
## Contribute
|
||||
|
||||
Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute
|
||||
issues, fixes, and patches to this project. If you are contributing code, see
|
||||
the instructions for [building a development environment](BUILDING.md).
|
||||
|
||||
## Support
|
||||
|
||||
If any issues are encountered while using the _Distribution_ project, several
|
||||
avenues are available for support:
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<th align="left">
|
||||
IRC
|
||||
</th>
|
||||
<td>
|
||||
#docker-distribution on FreeNode
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th align="left">
|
||||
Issue Tracker
|
||||
</th>
|
||||
<td>
|
||||
github.com/docker/distribution/issues
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th align="left">
|
||||
Google Groups
|
||||
</th>
|
||||
<td>
|
||||
https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th align="left">
|
||||
Mailing List
|
||||
</th>
|
||||
<td>
|
||||
docker@dockerproject.org
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
|
||||
## License
|
||||
|
||||
This project is distributed under [Apache License, Version 2.0](LICENSE).
|
41
vendor/github.com/docker/distribution/vendor.conf
generated
vendored
Normal file
41
vendor/github.com/docker/distribution/vendor.conf
generated
vendored
Normal file
@ -0,0 +1,41 @@
|
||||
github.com/Azure/azure-sdk-for-go c6f0533defaaaa26ea4dff3c9774e36033088112
|
||||
github.com/Sirupsen/logrus d26492970760ca5d33129d2d799e34be5c4782eb
|
||||
github.com/aws/aws-sdk-go c6fc52983ea2375810aa38ddb5370e9cdf611716
|
||||
github.com/bshuster-repo/logrus-logstash-hook 5f729f2fb50a301153cae84ff5c58981d51c095a
|
||||
github.com/bugsnag/bugsnag-go b1d153021fcd90ca3f080db36bec96dc690fb274
|
||||
github.com/bugsnag/osext 0dd3f918b21bec95ace9dc86c7e70266cfc5c702
|
||||
github.com/bugsnag/panicwrap e2c28503fcd0675329da73bf48b33404db873782
|
||||
github.com/denverdino/aliyungo afedced274aa9a7fcdd47ac97018f0f8db4e5de2
|
||||
github.com/docker/goamz f0a21f5b2e12f83a505ecf79b633bb2035cf6f85
|
||||
github.com/docker/libtrust fa567046d9b14f6aa788882a950d69651d230b21
|
||||
github.com/garyburd/redigo 535138d7bcd717d6531c701ef5933d98b1866257
|
||||
github.com/go-ini/ini 2ba15ac2dc9cdf88c110ec2dc0ced7fa45f5678c
|
||||
github.com/golang/protobuf/proto 8d92cf5fc15a4382f8964b08e1f42a75c0591aa3
|
||||
github.com/gorilla/context 14f550f51af52180c2eefed15e5fd18d63c0a64a
|
||||
github.com/gorilla/handlers 60c7bfde3e33c201519a200a4507a158cc03a17b
|
||||
github.com/gorilla/mux e444e69cbd2e2e3e0749a2f3c717cec491552bbf
|
||||
github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
|
||||
github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d
|
||||
github.com/miekg/dns 271c58e0c14f552178ea321a545ff9af38930f39
|
||||
github.com/mitchellh/mapstructure 482a9fd5fa83e8c4e7817413b80f3eb8feec03ef
|
||||
github.com/ncw/swift b964f2ca856aac39885e258ad25aec08d5f64ee6
|
||||
github.com/spf13/cobra 312092086bed4968099259622145a0c9ae280064
|
||||
github.com/spf13/pflag 5644820622454e71517561946e3d94b9f9db6842
|
||||
github.com/stevvooe/resumable 51ad44105773cafcbe91927f70ac68e1bf78f8b4
|
||||
github.com/xenolf/lego/acme a9d8cec0e6563575e5868a005359ac97911b5985
|
||||
github.com/yvasiyarov/go-metrics 57bccd1ccd43f94bb17fdd8bf3007059b802f85e
|
||||
github.com/yvasiyarov/gorelic a9bba5b9ab508a086f9a12b8c51fab68478e2128
|
||||
github.com/yvasiyarov/newrelic_platform_go b21fdbd4370f3717f3bbd2bf41c223bc273068e6
|
||||
golang.org/x/crypto c10c31b5e94b6f7a0283272dc2bb27163dcea24b
|
||||
golang.org/x/net 4876518f9e71663000c348837735820161a42df7
|
||||
golang.org/x/oauth2 045497edb6234273d67dbc25da3f2ddbc4c4cacf
|
||||
golang.org/x/time/rate a4bde12657593d5e90d0533a3e4fd95e635124cb
|
||||
google.golang.org/api 9bf6e6e569ff057f75d9604a46c52928f17d2b54
|
||||
google.golang.org/appengine 12d5545dc1cfa6047a286d5e853841b6471f4c19
|
||||
google.golang.org/cloud 975617b05ea8a58727e6c1a06b6161ff4185a9f2
|
||||
google.golang.org/grpc d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994
|
||||
gopkg.in/check.v1 64131543e7896d5bcc6bd5a76287eb75ea96c673
|
||||
gopkg.in/square/go-jose.v1 40d457b439244b546f023d056628e5184136899b
|
||||
gopkg.in/yaml.v2 bef53efd0c76e49e6de55ead051f886bea7e9420
|
||||
rsc.io/letsencrypt e770c10b0f1a64775ae91d240407ce00d1a5bdeb https://github.com/dmcgowan/letsencrypt.git
|
||||
github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb
|
1652
vendor/github.com/docker/docker/AUTHORS
generated
vendored
1652
vendor/github.com/docker/docker/AUTHORS
generated
vendored
File diff suppressed because it is too large
Load Diff
304
vendor/github.com/docker/docker/README.md
generated
vendored
Normal file
304
vendor/github.com/docker/docker/README.md
generated
vendored
Normal file
@ -0,0 +1,304 @@
|
||||
Docker: the container engine [](https://github.com/docker/docker/releases/latest)
|
||||
============================
|
||||
|
||||
Docker is an open source project to pack, ship and run any application
|
||||
as a lightweight container.
|
||||
|
||||
Docker containers are both *hardware-agnostic* and *platform-agnostic*.
|
||||
This means they can run anywhere, from your laptop to the largest
|
||||
cloud compute instance and everything in between - and they don't require
|
||||
you to use a particular language, framework or packaging system. That
|
||||
makes them great building blocks for deploying and scaling web apps,
|
||||
databases, and backend services without depending on a particular stack
|
||||
or provider.
|
||||
|
||||
Docker began as an open-source implementation of the deployment engine which
|
||||
powered [dotCloud](http://web.archive.org/web/20130530031104/https://www.dotcloud.com/),
|
||||
a popular Platform-as-a-Service. It benefits directly from the experience
|
||||
accumulated over several years of large-scale operation and support of hundreds
|
||||
of thousands of applications and databases.
|
||||
|
||||

|
||||
|
||||
## Security Disclosure
|
||||
|
||||
Security is very important to us. If you have any issue regarding security,
|
||||
please disclose the information responsibly by sending an email to
|
||||
security@docker.com and not by creating a GitHub issue.
|
||||
|
||||
## Better than VMs
|
||||
|
||||
A common method for distributing applications and sandboxing their
|
||||
execution is to use virtual machines, or VMs. Typical VM formats are
|
||||
VMware's vmdk, Oracle VirtualBox's vdi, and Amazon EC2's ami. In theory
|
||||
these formats should allow every developer to automatically package
|
||||
their application into a "machine" for easy distribution and deployment.
|
||||
In practice, that almost never happens, for a few reasons:
|
||||
|
||||
* *Size*: VMs are very large which makes them impractical to store
|
||||
and transfer.
|
||||
* *Performance*: running VMs consumes significant CPU and memory,
|
||||
which makes them impractical in many scenarios, for example local
|
||||
development of multi-tier applications, and large-scale deployment
|
||||
of cpu and memory-intensive applications on large numbers of
|
||||
machines.
|
||||
* *Portability*: competing VM environments don't play well with each
|
||||
other. Although conversion tools do exist, they are limited and
|
||||
add even more overhead.
|
||||
* *Hardware-centric*: VMs were designed with machine operators in
|
||||
mind, not software developers. As a result, they offer very
|
||||
limited tooling for what developers need most: building, testing
|
||||
and running their software. For example, VMs offer no facilities
|
||||
for application versioning, monitoring, configuration, logging or
|
||||
service discovery.
|
||||
|
||||
By contrast, Docker relies on a different sandboxing method known as
|
||||
*containerization*. Unlike traditional virtualization, containerization
|
||||
takes place at the kernel level. Most modern operating system kernels
|
||||
now support the primitives necessary for containerization, including
|
||||
Linux with [openvz](https://openvz.org),
|
||||
[vserver](http://linux-vserver.org) and more recently
|
||||
[lxc](https://linuxcontainers.org/), Solaris with
|
||||
[zones](https://docs.oracle.com/cd/E26502_01/html/E29024/preface-1.html#scrolltoc),
|
||||
and FreeBSD with
|
||||
[Jails](https://www.freebsd.org/doc/handbook/jails.html).
|
||||
|
||||
Docker builds on top of these low-level primitives to offer developers a
|
||||
portable format and runtime environment that solves all four problems.
|
||||
Docker containers are small (and their transfer can be optimized with
|
||||
layers), they have basically zero memory and cpu overhead, they are
|
||||
completely portable, and are designed from the ground up with an
|
||||
application-centric design.
|
||||
|
||||
Perhaps best of all, because Docker operates at the OS level, it can still be
|
||||
run inside a VM!
|
||||
|
||||
## Plays well with others
|
||||
|
||||
Docker does not require you to buy into a particular programming
|
||||
language, framework, packaging system, or configuration language.
|
||||
|
||||
Is your application a Unix process? Does it use files, tcp connections,
|
||||
environment variables, standard Unix streams and command-line arguments
|
||||
as inputs and outputs? Then Docker can run it.
|
||||
|
||||
Can your application's build be expressed as a sequence of such
|
||||
commands? Then Docker can build it.
|
||||
|
||||
## Escape dependency hell
|
||||
|
||||
A common problem for developers is the difficulty of managing all
|
||||
their application's dependencies in a simple and automated way.
|
||||
|
||||
This is usually difficult for several reasons:
|
||||
|
||||
* *Cross-platform dependencies*. Modern applications often depend on
|
||||
a combination of system libraries and binaries, language-specific
|
||||
packages, framework-specific modules, internal components
|
||||
developed for another project, etc. These dependencies live in
|
||||
different "worlds" and require different tools - these tools
|
||||
typically don't work well with each other, requiring awkward
|
||||
custom integrations.
|
||||
|
||||
* *Conflicting dependencies*. Different applications may depend on
|
||||
different versions of the same dependency. Packaging tools handle
|
||||
these situations with various degrees of ease - but they all
|
||||
handle them in different and incompatible ways, which again forces
|
||||
the developer to do extra work.
|
||||
|
||||
* *Custom dependencies*. A developer may need to prepare a custom
|
||||
version of their application's dependency. Some packaging systems
|
||||
can handle custom versions of a dependency, others can't - and all
|
||||
of them handle it differently.
|
||||
|
||||
|
||||
Docker solves the problem of dependency hell by giving the developer a simple
|
||||
way to express *all* their application's dependencies in one place, while
|
||||
streamlining the process of assembling them. If this makes you think of
|
||||
[XKCD 927](https://xkcd.com/927/), don't worry. Docker doesn't
|
||||
*replace* your favorite packaging systems. It simply orchestrates
|
||||
their use in a simple and repeatable way. How does it do that? With
|
||||
layers.
|
||||
|
||||
Docker defines a build as running a sequence of Unix commands, one
|
||||
after the other, in the same container. Build commands modify the
|
||||
contents of the container (usually by installing new files on the
|
||||
filesystem), the next command modifies it some more, etc. Since each
|
||||
build command inherits the result of the previous commands, the
|
||||
*order* in which the commands are executed expresses *dependencies*.
|
||||
|
||||
Here's a typical Docker build process:
|
||||
|
||||
```bash
|
||||
FROM ubuntu:12.04
|
||||
RUN apt-get update && apt-get install -y python python-pip curl
|
||||
RUN curl -sSL https://github.com/shykes/helloflask/archive/master.tar.gz | tar -xzv
|
||||
RUN cd helloflask-master && pip install -r requirements.txt
|
||||
```
|
||||
|
||||
Note that Docker doesn't care *how* dependencies are built - as long
|
||||
as they can be built by running a Unix command in a container.
|
||||
|
||||
|
||||
Getting started
|
||||
===============
|
||||
|
||||
Docker can be installed either on your computer for building applications or
|
||||
on servers for running them. To get started, [check out the installation
|
||||
instructions in the
|
||||
documentation](https://docs.docker.com/engine/installation/).
|
||||
|
||||
Usage examples
|
||||
==============
|
||||
|
||||
Docker can be used to run short-lived commands, long-running daemons
|
||||
(app servers, databases, etc.), interactive shell sessions, etc.
|
||||
|
||||
You can find a [list of real-world
|
||||
examples](https://docs.docker.com/engine/examples/) in the
|
||||
documentation.
|
||||
|
||||
Under the hood
|
||||
--------------
|
||||
|
||||
Under the hood, Docker is built on the following components:
|
||||
|
||||
* The
|
||||
[cgroups](https://www.kernel.org/doc/Documentation/cgroup-v1/cgroups.txt)
|
||||
and
|
||||
[namespaces](http://man7.org/linux/man-pages/man7/namespaces.7.html)
|
||||
capabilities of the Linux kernel
|
||||
* The [Go](https://golang.org) programming language
|
||||
* The [Docker Image Specification](https://github.com/docker/docker/blob/master/image/spec/v1.md)
|
||||
* The [Libcontainer Specification](https://github.com/opencontainers/runc/blob/master/libcontainer/SPEC.md)
|
||||
|
||||
Contributing to Docker [](https://godoc.org/github.com/docker/docker)
|
||||
======================
|
||||
|
||||
| **Master** (Linux) | **Experimental** (Linux) | **Windows** | **FreeBSD** |
|
||||
|------------------|----------------------|---------|---------|
|
||||
| [](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master/) | [](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master%20%28experimental%29/) | [/badge/icon)](http://jenkins.dockerproject.org/job/Docker%20Master%20(windows)/) | [/badge/icon)](http://jenkins.dockerproject.org/job/Docker%20Master%20(freebsd)/) |
|
||||
|
||||
Want to hack on Docker? Awesome! We have [instructions to help you get
|
||||
started contributing code or documentation](https://docs.docker.com/opensource/project/who-written-for/).
|
||||
|
||||
These instructions are probably not perfect, please let us know if anything
|
||||
feels wrong or incomplete. Better yet, submit a PR and improve them yourself.
|
||||
|
||||
Getting the development builds
|
||||
==============================
|
||||
|
||||
Want to run Docker from a master build? You can download
|
||||
master builds at [master.dockerproject.org](https://master.dockerproject.org).
|
||||
They are updated with each commit merged into the master branch.
|
||||
|
||||
Don't know how to use that super cool new feature in the master build? Check
|
||||
out the master docs at
|
||||
[docs.master.dockerproject.org](http://docs.master.dockerproject.org).
|
||||
|
||||
How the project is run
|
||||
======================
|
||||
|
||||
Docker is a very, very active project. If you want to learn more about how it is run,
|
||||
or want to get more involved, the best place to start is [the project directory](https://github.com/docker/docker/tree/master/project).
|
||||
|
||||
We are always open to suggestions on process improvements, and are always looking for more maintainers.
|
||||
|
||||
### Talking to other Docker users and contributors
|
||||
|
||||
<table class="tg">
|
||||
<col width="45%">
|
||||
<col width="65%">
|
||||
<tr>
|
||||
<td>Internet Relay Chat (IRC)</td>
|
||||
<td>
|
||||
<p>
|
||||
IRC is a direct line to our most knowledgeable Docker users; we have
|
||||
both the <code>#docker</code> and <code>#docker-dev</code> group on
|
||||
<strong>irc.freenode.net</strong>.
|
||||
IRC is a rich chat protocol but it can overwhelm new users. You can search
|
||||
<a href="https://botbot.me/freenode/docker/#" target="_blank">our chat archives</a>.
|
||||
</p>
|
||||
Read our <a href="https://docs.docker.com/opensource/get-help/#/irc-quickstart" target="_blank">IRC quickstart guide</a> for an easy way to get started.
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Docker Community Forums</td>
|
||||
<td>
|
||||
The <a href="https://forums.docker.com/c/open-source-projects/de" target="_blank">Docker Engine</a>
|
||||
group is for users of the Docker Engine project.
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Google Groups</td>
|
||||
<td>
|
||||
The <a href="https://groups.google.com/forum/#!forum/docker-dev"
|
||||
target="_blank">docker-dev</a> group is for contributors and other people
|
||||
contributing to the Docker project. You can join this group without a
|
||||
Google account by sending an email to <a
|
||||
href="mailto:docker-dev+subscribe@googlegroups.com">docker-dev+subscribe@googlegroups.com</a>.
|
||||
You'll receive a join-request message; simply reply to the message to
|
||||
confirm your subscription.
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Twitter</td>
|
||||
<td>
|
||||
You can follow <a href="https://twitter.com/docker/" target="_blank">Docker's Twitter feed</a>
|
||||
to get updates on our products. You can also tweet us questions or just
|
||||
share blogs or stories.
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Stack Overflow</td>
|
||||
<td>
|
||||
Stack Overflow has over 7000 Docker questions listed. We regularly
|
||||
monitor <a href="https://stackoverflow.com/search?tab=newest&q=docker" target="_blank">Docker questions</a>
|
||||
and so do many other knowledgeable Docker users.
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
### Legal
|
||||
|
||||
*Brought to you courtesy of our legal counsel. For more context,
|
||||
please see the [NOTICE](https://github.com/docker/docker/blob/master/NOTICE) document in this repo.*
|
||||
|
||||
Use and transfer of Docker may be subject to certain restrictions by the
|
||||
United States and other governments.
|
||||
|
||||
It is your responsibility to ensure that your use and/or transfer does not
|
||||
violate applicable laws.
|
||||
|
||||
For more information, please see https://www.bis.doc.gov
|
||||
|
||||
|
||||
Licensing
|
||||
=========
|
||||
Docker is licensed under the Apache License, Version 2.0. See
|
||||
[LICENSE](https://github.com/docker/docker/blob/master/LICENSE) for the full
|
||||
license text.
|
||||
|
||||
Other Docker Related Projects
|
||||
=============================
|
||||
There are a number of projects under development that are based on Docker's
|
||||
core technology. These projects expand the tooling built around the
|
||||
Docker platform to broaden its application and utility.
|
||||
|
||||
* [Docker Registry](https://github.com/docker/distribution): Registry
|
||||
server for Docker (hosting/delivery of repositories and images)
|
||||
* [Docker Machine](https://github.com/docker/machine): Machine management
|
||||
for a container-centric world
|
||||
* [Docker Swarm](https://github.com/docker/swarm): A Docker-native clustering
|
||||
system
|
||||
* [Docker Compose](https://github.com/docker/compose) (formerly Fig):
|
||||
Define and run multi-container apps
|
||||
* [Kitematic](https://github.com/docker/kitematic): The easiest way to use
|
||||
Docker on Mac and Windows
|
||||
|
||||
If you know of another project underway that should be listed here, please help
|
||||
us keep this list up-to-date by submitting a PR.
|
||||
|
||||
Awesome-Docker
|
||||
==============
|
||||
You can find more projects, tools and articles related to Docker on the [awesome-docker list](https://github.com/veggiemonk/awesome-docker). Add your project there.
|
11
vendor/github.com/docker/docker/pkg/README.md
generated
vendored
Normal file
11
vendor/github.com/docker/docker/pkg/README.md
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
pkg/ is a collection of utility packages used by the Docker project without being specific to its internals.
|
||||
|
||||
Utility packages are kept separate from the docker core codebase to keep it as small and concise as possible.
|
||||
If some utilities grow larger and their APIs stabilize, they may be moved to their own repository under the
|
||||
Docker organization, to facilitate re-use by other projects. However that is not the priority.
|
||||
|
||||
The directory `pkg` is named after the same directory in the camlistore project. Since Brad is a core
|
||||
Go maintainer, we thought it made sense to copy his methods for organizing Go code :) Thanks Brad!
|
||||
|
||||
Because utility packages are small and neatly separated from the rest of the codebase, they are a good
|
||||
place to start for aspiring maintainers and contributors. Get in touch if you want to help maintain them!
|
2
vendor/github.com/docker/docker/pkg/signal/trap.go
generated
vendored
2
vendor/github.com/docker/docker/pkg/signal/trap.go
generated
vendored
@ -11,8 +11,8 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Trap sets up a simplified signal "trap", appropriate for common
|
||||
|
1
vendor/github.com/docker/docker/project/CONTRIBUTORS.md
generated
vendored
1
vendor/github.com/docker/docker/project/CONTRIBUTORS.md
generated
vendored
@ -1 +0,0 @@
|
||||
../CONTRIBUTING.md
|
140
vendor/github.com/docker/docker/vendor.conf
generated
vendored
Normal file
140
vendor/github.com/docker/docker/vendor.conf
generated
vendored
Normal file
@ -0,0 +1,140 @@
|
||||
# the following lines are in sorted order, FYI
|
||||
github.com/Azure/go-ansiterm 388960b655244e76e24c75f48631564eaefade62
|
||||
github.com/Microsoft/hcsshim v0.5.9
|
||||
github.com/Microsoft/go-winio v0.3.8
|
||||
github.com/Sirupsen/logrus v0.11.0
|
||||
github.com/davecgh/go-spew 6d212800a42e8ab5c146b8ace3490ee17e5225f9
|
||||
github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a
|
||||
github.com/go-check/check 4ed411733c5785b40214c70bce814c3a3a689609 https://github.com/cpuguy83/check.git
|
||||
github.com/gorilla/context v1.1
|
||||
github.com/gorilla/mux v1.1
|
||||
github.com/kr/pty 5cf931ef8f
|
||||
github.com/mattn/go-shellwords v1.0.0
|
||||
github.com/mattn/go-sqlite3 v1.1.0
|
||||
github.com/tchap/go-patricia v2.2.6
|
||||
github.com/vdemeester/shakers 24d7f1d6a71aa5d9cbe7390e4afb66b7eef9e1b3
|
||||
# forked golang.org/x/net package includes a patch for lazy loading trace templates
|
||||
golang.org/x/net 2beffdc2e92c8a3027590f898fe88f69af48a3f8 https://github.com/tonistiigi/net.git
|
||||
golang.org/x/sys 8f0908ab3b2457e2e15403d3697c9ef5cb4b57a9
|
||||
github.com/docker/go-units 8a7beacffa3009a9ac66bad506b18ffdd110cf97
|
||||
github.com/docker/go-connections ecb4cb2dd420ada7df7f2593d6c25441f65f69f2
|
||||
|
||||
github.com/RackSec/srslog 456df3a81436d29ba874f3590eeeee25d666f8a5
|
||||
github.com/imdario/mergo 0.2.1
|
||||
|
||||
#get libnetwork packages
|
||||
github.com/docker/libnetwork 45b40861e677e37cf27bc184eca5af92f8cdd32d
|
||||
github.com/docker/go-events 18b43f1bc85d9cdd42c05a6cd2d444c7a200a894
|
||||
github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80
|
||||
github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec
|
||||
github.com/hashicorp/go-msgpack 71c2886f5a673a35f909803f38ece5810165097b
|
||||
github.com/hashicorp/memberlist 88ac4de0d1a0ca6def284b571342db3b777a4c37
|
||||
github.com/hashicorp/go-multierror fcdddc395df1ddf4247c69bd436e84cfa0733f7e
|
||||
github.com/hashicorp/serf 598c54895cc5a7b1a24a398d635e8c0ea0959870
|
||||
github.com/docker/libkv 1d8431073ae03cdaedb198a89722f3aab6d418ef
|
||||
github.com/vishvananda/netns 604eaf189ee867d8c147fafc28def2394e878d25
|
||||
github.com/vishvananda/netlink 482f7a52b758233521878cb6c5904b6bd63f3457
|
||||
github.com/BurntSushi/toml f706d00e3de6abe700c994cdd545a1a4915af060
|
||||
github.com/samuel/go-zookeeper d0e0d8e11f318e000a8cc434616d69e329edc374
|
||||
github.com/deckarep/golang-set ef32fa3046d9f249d399f98ebaf9be944430fd1d
|
||||
github.com/coreos/etcd 3a49cbb769ebd8d1dd25abb1e83386e9883a5707
|
||||
github.com/ugorji/go f1f1a805ed361a0e078bb537e4ea78cd37dcf065
|
||||
github.com/hashicorp/consul v0.5.2
|
||||
github.com/boltdb/bolt fff57c100f4dea1905678da7e90d92429dff2904
|
||||
github.com/miekg/dns 75e6e86cc601825c5dbcd4e0c209eab180997cd7
|
||||
|
||||
# get graph and distribution packages
|
||||
github.com/docker/distribution 28602af35aceda2f8d571bad7ca37a54cf0250bc
|
||||
github.com/vbatts/tar-split v0.10.1
|
||||
|
||||
# get go-zfs packages
|
||||
github.com/mistifyio/go-zfs 22c9b32c84eb0d0c6f4043b6e90fc94073de92fa
|
||||
github.com/pborman/uuid v1.0
|
||||
|
||||
# get desired notary commit, might also need to be updated in Dockerfile
|
||||
github.com/docker/notary v0.4.2
|
||||
|
||||
google.golang.org/grpc v1.0.2
|
||||
github.com/miekg/pkcs11 df8ae6ca730422dba20c768ff38ef7d79077a59f
|
||||
github.com/docker/go v1.5.1-1-1-gbaf439e
|
||||
github.com/agl/ed25519 d2b94fd789ea21d12fac1a4443dd3a3f79cda72c
|
||||
|
||||
# When updating, also update RUNC_COMMIT in hack/dockerfile/binaries-commits accordingly
|
||||
github.com/opencontainers/runc 9df8b306d01f59d3a8029be411de015b7304dd8f https://github.com/docker/runc.git # libcontainer
|
||||
github.com/opencontainers/runtime-spec 1c7c27d043c2a5e513a44084d2b10d77d1402b8c # specs
|
||||
github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0
|
||||
# libcontainer deps (see src/github.com/opencontainers/runc/Godeps/Godeps.json)
|
||||
github.com/coreos/go-systemd v4
|
||||
github.com/godbus/dbus v4.0.0
|
||||
github.com/syndtr/gocapability 2c00daeb6c3b45114c80ac44119e7b8801fdd852
|
||||
github.com/golang/protobuf 1f49d83d9aa00e6ce4fc8258c71cc7786aec968a
|
||||
|
||||
# gelf logging driver deps
|
||||
github.com/Graylog2/go-gelf aab2f594e4585d43468ac57287b0dece9d806883
|
||||
|
||||
github.com/fluent/fluent-logger-golang v1.2.1
|
||||
# fluent-logger-golang deps
|
||||
github.com/philhofer/fwd 899e4efba8eaa1fea74175308f3fae18ff3319fa
|
||||
github.com/tinylib/msgp 75ee40d2601edf122ef667e2a07d600d4c44490c
|
||||
|
||||
# fsnotify
|
||||
github.com/fsnotify/fsnotify v1.2.11
|
||||
|
||||
# awslogs deps
|
||||
github.com/aws/aws-sdk-go v1.4.22
|
||||
github.com/go-ini/ini 060d7da055ba6ec5ea7a31f116332fe5efa04ce0
|
||||
github.com/jmespath/go-jmespath 0b12d6b521d83fc7f755e7cfc1b1fbdd35a01a74
|
||||
|
||||
# logentries
|
||||
github.com/bsphere/le_go d3308aafe090956bc89a65f0769f58251a1b4f03
|
||||
|
||||
# gcplogs deps
|
||||
golang.org/x/oauth2 2baa8a1b9338cf13d9eeb27696d761155fa480be
|
||||
google.golang.org/api dc6d2353af16e2a2b0ff6986af051d473a4ed468
|
||||
google.golang.org/cloud dae7e3d993bc3812a2185af60552bb6b847e52a0
|
||||
|
||||
# native credentials
|
||||
github.com/docker/docker-credential-helpers f72c04f1d8e71959a6d103f808c50ccbad79b9fd
|
||||
|
||||
# containerd
|
||||
github.com/docker/containerd aa8187dbd3b7ad67d8e5e3a15115d3eef43a7ed1
|
||||
github.com/tonistiigi/fifo 1405643975692217d6720f8b54aeee1bf2cd5cf4
|
||||
|
||||
# cluster
|
||||
github.com/docker/swarmkit 1c7f003d75f091d5f7051ed982594420e4515f77
|
||||
github.com/golang/mock bd3c8e81be01eef76d4b503f5e687d2d1354d2d9
|
||||
github.com/gogo/protobuf v0.3
|
||||
github.com/cloudflare/cfssl 7fb22c8cba7ecaf98e4082d22d65800cf45e042a
|
||||
github.com/google/certificate-transparency d90e65c3a07988180c5b1ece71791c0b6506826e
|
||||
golang.org/x/crypto 3fbbcd23f1cb824e69491a5930cfeff09b12f4d2
|
||||
golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb
|
||||
github.com/mreiferson/go-httpclient 63fe23f7434723dc904c901043af07931f293c47
|
||||
github.com/hashicorp/go-memdb 608dda3b1410a73eaf3ac8b517c9ae7ebab6aa87
|
||||
github.com/hashicorp/go-immutable-radix 8e8ed81f8f0bf1bdd829593fdd5c29922c1ea990
|
||||
github.com/hashicorp/golang-lru a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4
|
||||
github.com/coreos/pkg fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8
|
||||
github.com/pivotal-golang/clock 3fd3c1944c59d9742e1cd333672181cd1a6f9fa0
|
||||
github.com/prometheus/client_golang 52437c81da6b127a9925d17eb3a382a2e5fd395e
|
||||
github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
|
||||
github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
|
||||
github.com/prometheus/common ebdfc6da46522d58825777cf1f90490a5b1ef1d8
|
||||
github.com/prometheus/procfs abf152e5f3e97f2fafac028d2cc06c1feb87ffa5
|
||||
bitbucket.org/ww/goautoneg 75cd24fc2f2c2a2088577d12123ddee5f54e0675
|
||||
github.com/matttproud/golang_protobuf_extensions fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a
|
||||
github.com/pkg/errors 839d9e913e063e28dfd0e6c7b7512793e0a48be9
|
||||
|
||||
# cli
|
||||
github.com/spf13/cobra v1.5 https://github.com/dnephin/cobra.git
|
||||
github.com/spf13/pflag dabebe21bf790f782ea4c7bbd2efc430de182afd
|
||||
github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
|
||||
github.com/flynn-archive/go-shlex 3f9db97f856818214da2e1057f8ad84803971cff
|
||||
|
||||
# metrics
|
||||
github.com/docker/go-metrics 86138d05f285fd9737a99bee2d9be30866b59d72
|
||||
|
||||
# composefile
|
||||
github.com/mitchellh/mapstructure f3009df150dadf309fdee4a54ed65c124afad715
|
||||
github.com/xeipuuv/gojsonpointer e0fe6f68307607d540ed8eac07a342c33fa1b54a
|
||||
github.com/xeipuuv/gojsonreference e02fc20de94c78484cd5ffb007f8af96be030a45
|
||||
github.com/xeipuuv/gojsonschema 93e72a773fade158921402d6a24c819b48aba29d
|
||||
gopkg.in/yaml.v2 a83829b6f1293c91addabc89d0571c246397bbf4
|
24
vendor/github.com/docker/go-events/.gitignore
generated
vendored
24
vendor/github.com/docker/go-events/.gitignore
generated
vendored
@ -1,24 +0,0 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
70
vendor/github.com/docker/go-events/CONTRIBUTING.md
generated
vendored
70
vendor/github.com/docker/go-events/CONTRIBUTING.md
generated
vendored
@ -1,70 +0,0 @@
|
||||
# Contributing to Docker open source projects
|
||||
|
||||
Want to hack on go-events? Awesome! Here are instructions to get you started.
|
||||
|
||||
go-events is part of the [Docker](https://www.docker.com) project, and
|
||||
follows the same rules and principles. If you're already familiar with the way
|
||||
Docker does things, you'll feel right at home.
|
||||
|
||||
Otherwise, go read Docker's
|
||||
[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md),
|
||||
[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md),
|
||||
[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and
|
||||
[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md).
|
||||
|
||||
For an in-depth description of our contribution process, visit the
|
||||
contributors guide: [Understand how to contribute](https://docs.docker.com/opensource/workflow/make-a-contribution/)
|
||||
|
||||
### Sign your work
|
||||
|
||||
The sign-off is a simple line at the end of the explanation for the patch. Your
|
||||
signature certifies that you wrote the patch or otherwise have the right to pass
|
||||
it on as an open-source patch. The rules are pretty simple: if you can certify
|
||||
the below (from [developercertificate.org](http://developercertificate.org/)):
|
||||
|
||||
```
|
||||
Developer Certificate of Origin
|
||||
Version 1.1
|
||||
|
||||
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||
660 York Street, Suite 102,
|
||||
San Francisco, CA 94110 USA
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies of this
|
||||
license document, but changing it is not allowed.
|
||||
|
||||
Developer's Certificate of Origin 1.1
|
||||
|
||||
By making a contribution to this project, I certify that:
|
||||
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
||||
```
|
||||
|
||||
Then you just add a line to every git commit message:
|
||||
|
||||
Signed-off-by: Joe Smith <joe.smith@email.com>
|
||||
|
||||
Use your real name (sorry, no pseudonyms or anonymous contributions.)
|
||||
|
||||
If you set your `user.name` and `user.email` git configs, you can sign your
|
||||
commit automatically with `git commit -s`.
|
46
vendor/github.com/docker/go-events/MAINTAINERS
generated
vendored
46
vendor/github.com/docker/go-events/MAINTAINERS
generated
vendored
@ -1,46 +0,0 @@
|
||||
# go-events maintainers file
|
||||
#
|
||||
# This file describes who runs the docker/go-events project and how.
|
||||
# This is a living document - if you see something out of date or missing, speak up!
|
||||
#
|
||||
# It is structured to be consumable by both humans and programs.
|
||||
# To extract its contents programmatically, use any TOML-compliant parser.
|
||||
#
|
||||
# This file is compiled into the MAINTAINERS file in docker/opensource.
|
||||
#
|
||||
[Org]
|
||||
[Org."Core maintainers"]
|
||||
people = [
|
||||
"aaronlehmann",
|
||||
"aluzzardi",
|
||||
"lk4d4",
|
||||
"stevvooe",
|
||||
]
|
||||
|
||||
[people]
|
||||
|
||||
# A reference list of all people associated with the project.
|
||||
# All other sections should refer to people by their canonical key
|
||||
# in the people section.
|
||||
|
||||
# ADD YOURSELF HERE IN ALPHABETICAL ORDER
|
||||
|
||||
[people.aaronlehmann]
|
||||
Name = "Aaron Lehmann"
|
||||
Email = "aaron.lehmann@docker.com"
|
||||
GitHub = "aaronlehmann"
|
||||
|
||||
[people.aluzzardi]
|
||||
Name = "Andrea Luzzardi"
|
||||
Email = "al@docker.com"
|
||||
GitHub = "aluzzardi"
|
||||
|
||||
[people.lk4d4]
|
||||
Name = "Alexander Morozov"
|
||||
Email = "lk4d4@docker.com"
|
||||
GitHub = "lk4d4"
|
||||
|
||||
[people.stevvooe]
|
||||
Name = "Stephen Day"
|
||||
Email = "stephen.day@docker.com"
|
||||
GitHub = "stevvooe"
|
5
vendor/github.com/fsnotify/fsnotify/.editorconfig
generated
vendored
5
vendor/github.com/fsnotify/fsnotify/.editorconfig
generated
vendored
@ -1,5 +0,0 @@
|
||||
root = true
|
||||
|
||||
[*]
|
||||
indent_style = tab
|
||||
indent_size = 4
|
6
vendor/github.com/fsnotify/fsnotify/.gitignore
generated
vendored
6
vendor/github.com/fsnotify/fsnotify/.gitignore
generated
vendored
@ -1,6 +0,0 @@
|
||||
# Setup a Global .gitignore for OS and editor generated files:
|
||||
# https://help.github.com/articles/ignoring-files
|
||||
# git config --global core.excludesfile ~/.gitignore_global
|
||||
|
||||
.vagrant
|
||||
*.sublime-project
|
30
vendor/github.com/fsnotify/fsnotify/.travis.yml
generated
vendored
30
vendor/github.com/fsnotify/fsnotify/.travis.yml
generated
vendored
@ -1,30 +0,0 @@
|
||||
sudo: false
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.8
|
||||
- 1.7.x
|
||||
- tip
|
||||
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
||||
fast_finish: true
|
||||
|
||||
before_script:
|
||||
- go get -u github.com/golang/lint/golint
|
||||
|
||||
script:
|
||||
- go test -v --race ./...
|
||||
|
||||
after_script:
|
||||
- test -z "$(gofmt -s -l -w . | tee /dev/stderr)"
|
||||
- test -z "$(golint ./... | tee /dev/stderr)"
|
||||
- go vet ./...
|
||||
|
||||
os:
|
||||
- linux
|
||||
- osx
|
||||
|
||||
notifications:
|
||||
email: false
|
46
vendor/github.com/fsnotify/fsnotify/AUTHORS
generated
vendored
46
vendor/github.com/fsnotify/fsnotify/AUTHORS
generated
vendored
@ -1,46 +0,0 @@
|
||||
# Names should be added to this file as
|
||||
# Name or Organization <email address>
|
||||
# The email address is not required for organizations.
|
||||
|
||||
# You can update this list using the following command:
|
||||
#
|
||||
# $ git shortlog -se | awk '{print $2 " " $3 " " $4}'
|
||||
|
||||
# Please keep the list sorted.
|
||||
|
||||
Adrien Bustany <adrien@bustany.org>
|
||||
Amit Krishnan <amit.krishnan@oracle.com>
|
||||
Bjørn Erik Pedersen <bjorn.erik.pedersen@gmail.com>
|
||||
Bruno Bigras <bigras.bruno@gmail.com>
|
||||
Caleb Spare <cespare@gmail.com>
|
||||
Case Nelson <case@teammating.com>
|
||||
Chris Howey <chris@howey.me> <howeyc@gmail.com>
|
||||
Christoffer Buchholz <christoffer.buchholz@gmail.com>
|
||||
Daniel Wagner-Hall <dawagner@gmail.com>
|
||||
Dave Cheney <dave@cheney.net>
|
||||
Evan Phoenix <evan@fallingsnow.net>
|
||||
Francisco Souza <f@souza.cc>
|
||||
Hari haran <hariharan.uno@gmail.com>
|
||||
John C Barstow
|
||||
Kelvin Fo <vmirage@gmail.com>
|
||||
Ken-ichirou MATSUZAWA <chamas@h4.dion.ne.jp>
|
||||
Matt Layher <mdlayher@gmail.com>
|
||||
Nathan Youngman <git@nathany.com>
|
||||
Patrick <patrick@dropbox.com>
|
||||
Paul Hammond <paul@paulhammond.org>
|
||||
Pawel Knap <pawelknap88@gmail.com>
|
||||
Pieter Droogendijk <pieter@binky.org.uk>
|
||||
Pursuit92 <JoshChase@techpursuit.net>
|
||||
Riku Voipio <riku.voipio@linaro.org>
|
||||
Rob Figueiredo <robfig@gmail.com>
|
||||
Slawek Ligus <root@ooz.ie>
|
||||
Soge Zhang <zhssoge@gmail.com>
|
||||
Tiffany Jernigan <tiffany.jernigan@intel.com>
|
||||
Tilak Sharma <tilaks@google.com>
|
||||
Travis Cline <travis.cline@gmail.com>
|
||||
Tudor Golubenco <tudor.g@gmail.com>
|
||||
Yukang <moorekang@gmail.com>
|
||||
bronze1man <bronze1man@gmail.com>
|
||||
debrando <denis.brandolini@gmail.com>
|
||||
henrikedwards <henrik.edwards@gmail.com>
|
||||
铁哥 <guotie.9@gmail.com>
|
307
vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
generated
vendored
307
vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
generated
vendored
@ -1,307 +0,0 @@
|
||||
# Changelog
|
||||
|
||||
## v1.4.2 / 2016-10-10
|
||||
|
||||
* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack)
|
||||
|
||||
## v1.4.1 / 2016-10-04
|
||||
|
||||
* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack)
|
||||
|
||||
## v1.4.0 / 2016-10-01
|
||||
|
||||
* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie)
|
||||
|
||||
## v1.3.1 / 2016-06-28
|
||||
|
||||
* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc)
|
||||
|
||||
## v1.3.0 / 2016-04-19
|
||||
|
||||
* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135)
|
||||
|
||||
## v1.2.10 / 2016-03-02
|
||||
|
||||
* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj)
|
||||
|
||||
## v1.2.9 / 2016-01-13
|
||||
|
||||
kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep)
|
||||
|
||||
## v1.2.8 / 2015-12-17
|
||||
|
||||
* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test)
|
||||
* inotify: fix race in test
|
||||
* enable race detection for continuous integration (Linux, Mac, Windows)
|
||||
|
||||
## v1.2.5 / 2015-10-17
|
||||
|
||||
* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki)
|
||||
* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken)
|
||||
* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie)
|
||||
* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion)
|
||||
|
||||
## v1.2.1 / 2015-10-14
|
||||
|
||||
* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx)
|
||||
|
||||
## v1.2.0 / 2015-02-08
|
||||
|
||||
* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD)
|
||||
* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD)
|
||||
* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59)
|
||||
|
||||
## v1.1.1 / 2015-02-05
|
||||
|
||||
* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD)
|
||||
|
||||
## v1.1.0 / 2014-12-12
|
||||
|
||||
* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43)
|
||||
* add low-level functions
|
||||
* only need to store flags on directories
|
||||
* less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13)
|
||||
* done can be an unbuffered channel
|
||||
* remove calls to os.NewSyscallError
|
||||
* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher)
|
||||
* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48)
|
||||
* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
|
||||
|
||||
## v1.0.4 / 2014-09-07
|
||||
|
||||
* kqueue: add dragonfly to the build tags.
|
||||
* Rename source code files, rearrange code so exported APIs are at the top.
|
||||
* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang)
|
||||
|
||||
## v1.0.3 / 2014-08-19
|
||||
|
||||
* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36)
|
||||
|
||||
## v1.0.2 / 2014-08-17
|
||||
|
||||
* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
|
||||
* [Fix] Make ./path and path equivalent. (thanks @zhsso)
|
||||
|
||||
## v1.0.0 / 2014-08-15
|
||||
|
||||
* [API] Remove AddWatch on Windows, use Add.
|
||||
* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30)
|
||||
* Minor updates based on feedback from golint.
|
||||
|
||||
## dev / 2014-07-09
|
||||
|
||||
* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify).
|
||||
* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno)
|
||||
|
||||
## dev / 2014-07-04
|
||||
|
||||
* kqueue: fix incorrect mutex used in Close()
|
||||
* Update example to demonstrate usage of Op.
|
||||
|
||||
## dev / 2014-06-28
|
||||
|
||||
* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4)
|
||||
* Fix for String() method on Event (thanks Alex Brainman)
|
||||
* Don't build on Plan 9 or Solaris (thanks @4ad)
|
||||
|
||||
## dev / 2014-06-21
|
||||
|
||||
* Events channel of type Event rather than *Event.
|
||||
* [internal] use syscall constants directly for inotify and kqueue.
|
||||
* [internal] kqueue: rename events to kevents and fileEvent to event.
|
||||
|
||||
## dev / 2014-06-19
|
||||
|
||||
* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally).
|
||||
* [internal] remove cookie from Event struct (unused).
|
||||
* [internal] Event struct has the same definition across every OS.
|
||||
* [internal] remove internal watch and removeWatch methods.
|
||||
|
||||
## dev / 2014-06-12
|
||||
|
||||
* [API] Renamed Watch() to Add() and RemoveWatch() to Remove().
|
||||
* [API] Pluralized channel names: Events and Errors.
|
||||
* [API] Renamed FileEvent struct to Event.
|
||||
* [API] Op constants replace methods like IsCreate().
|
||||
|
||||
## dev / 2014-06-12
|
||||
|
||||
* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
|
||||
|
||||
## dev / 2014-05-23
|
||||
|
||||
* [API] Remove current implementation of WatchFlags.
|
||||
* current implementation doesn't take advantage of OS for efficiency
|
||||
* provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes
|
||||
* no tests for the current implementation
|
||||
* not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195)
|
||||
|
||||
## v0.9.3 / 2014-12-31
|
||||
|
||||
* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
|
||||
|
||||
## v0.9.2 / 2014-08-17
|
||||
|
||||
* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
|
||||
|
||||
## v0.9.1 / 2014-06-12
|
||||
|
||||
* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
|
||||
|
||||
## v0.9.0 / 2014-01-17
|
||||
|
||||
* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany)
|
||||
* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare)
|
||||
* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library.
|
||||
|
||||
## v0.8.12 / 2013-11-13
|
||||
|
||||
* [API] Remove FD_SET and friends from Linux adapter
|
||||
|
||||
## v0.8.11 / 2013-11-02
|
||||
|
||||
* [Doc] Add Changelog [#72][] (thanks @nathany)
|
||||
* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond)
|
||||
|
||||
## v0.8.10 / 2013-10-19
|
||||
|
||||
* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott)
|
||||
* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer)
|
||||
* [Doc] specify OS-specific limits in README (thanks @debrando)
|
||||
|
||||
## v0.8.9 / 2013-09-08
|
||||
|
||||
* [Doc] Contributing (thanks @nathany)
|
||||
* [Doc] update package path in example code [#63][] (thanks @paulhammond)
|
||||
* [Doc] GoCI badge in README (Linux only) [#60][]
|
||||
* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany)
|
||||
|
||||
## v0.8.8 / 2013-06-17
|
||||
|
||||
* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie)
|
||||
|
||||
## v0.8.7 / 2013-06-03
|
||||
|
||||
* [API] Make syscall flags internal
|
||||
* [Fix] inotify: ignore event changes
|
||||
* [Fix] race in symlink test [#45][] (reported by @srid)
|
||||
* [Fix] tests on Windows
|
||||
* lower case error messages
|
||||
|
||||
## v0.8.6 / 2013-05-23
|
||||
|
||||
* kqueue: Use EVT_ONLY flag on Darwin
|
||||
* [Doc] Update README with full example
|
||||
|
||||
## v0.8.5 / 2013-05-09
|
||||
|
||||
* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg)
|
||||
|
||||
## v0.8.4 / 2013-04-07
|
||||
|
||||
* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz)
|
||||
|
||||
## v0.8.3 / 2013-03-13
|
||||
|
||||
* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin)
|
||||
* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin)
|
||||
|
||||
## v0.8.2 / 2013-02-07
|
||||
|
||||
* [Doc] add Authors
|
||||
* [Fix] fix data races for map access [#29][] (thanks @fsouza)
|
||||
|
||||
## v0.8.1 / 2013-01-09
|
||||
|
||||
* [Fix] Windows path separators
|
||||
* [Doc] BSD License
|
||||
|
||||
## v0.8.0 / 2012-11-09
|
||||
|
||||
* kqueue: directory watching improvements (thanks @vmirage)
|
||||
* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto)
|
||||
* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr)
|
||||
|
||||
## v0.7.4 / 2012-10-09
|
||||
|
||||
* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji)
|
||||
* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig)
|
||||
* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig)
|
||||
* [Fix] kqueue: modify after recreation of file
|
||||
|
||||
## v0.7.3 / 2012-09-27
|
||||
|
||||
* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage)
|
||||
* [Fix] kqueue: no longer get duplicate CREATE events
|
||||
|
||||
## v0.7.2 / 2012-09-01
|
||||
|
||||
* kqueue: events for created directories
|
||||
|
||||
## v0.7.1 / 2012-07-14
|
||||
|
||||
* [Fix] for renaming files
|
||||
|
||||
## v0.7.0 / 2012-07-02
|
||||
|
||||
* [Feature] FSNotify flags
|
||||
* [Fix] inotify: Added file name back to event path
|
||||
|
||||
## v0.6.0 / 2012-06-06
|
||||
|
||||
* kqueue: watch files after directory created (thanks @tmc)
|
||||
|
||||
## v0.5.1 / 2012-05-22
|
||||
|
||||
* [Fix] inotify: remove all watches before Close()
|
||||
|
||||
## v0.5.0 / 2012-05-03
|
||||
|
||||
* [API] kqueue: return errors during watch instead of sending over channel
|
||||
* kqueue: match symlink behavior on Linux
|
||||
* inotify: add `DELETE_SELF` (requested by @taralx)
|
||||
* [Fix] kqueue: handle EINTR (reported by @robfig)
|
||||
* [Doc] Godoc example [#1][] (thanks @davecheney)
|
||||
|
||||
## v0.4.0 / 2012-03-30
|
||||
|
||||
* Go 1 released: build with go tool
|
||||
* [Feature] Windows support using winfsnotify
|
||||
* Windows does not have attribute change notifications
|
||||
* Roll attribute notifications into IsModify
|
||||
|
||||
## v0.3.0 / 2012-02-19
|
||||
|
||||
* kqueue: add files when watch directory
|
||||
|
||||
## v0.2.0 / 2011-12-30
|
||||
|
||||
* update to latest Go weekly code
|
||||
|
||||
## v0.1.0 / 2011-10-19
|
||||
|
||||
* kqueue: add watch on file creation to match inotify
|
||||
* kqueue: create file event
|
||||
* inotify: ignore `IN_IGNORED` events
|
||||
* event String()
|
||||
* linux: common FileEvent functions
|
||||
* initial commit
|
||||
|
||||
[#79]: https://github.com/howeyc/fsnotify/pull/79
|
||||
[#77]: https://github.com/howeyc/fsnotify/pull/77
|
||||
[#72]: https://github.com/howeyc/fsnotify/issues/72
|
||||
[#71]: https://github.com/howeyc/fsnotify/issues/71
|
||||
[#70]: https://github.com/howeyc/fsnotify/issues/70
|
||||
[#63]: https://github.com/howeyc/fsnotify/issues/63
|
||||
[#62]: https://github.com/howeyc/fsnotify/issues/62
|
||||
[#60]: https://github.com/howeyc/fsnotify/issues/60
|
||||
[#59]: https://github.com/howeyc/fsnotify/issues/59
|
||||
[#49]: https://github.com/howeyc/fsnotify/issues/49
|
||||
[#45]: https://github.com/howeyc/fsnotify/issues/45
|
||||
[#40]: https://github.com/howeyc/fsnotify/issues/40
|
||||
[#36]: https://github.com/howeyc/fsnotify/issues/36
|
||||
[#33]: https://github.com/howeyc/fsnotify/issues/33
|
||||
[#29]: https://github.com/howeyc/fsnotify/issues/29
|
||||
[#25]: https://github.com/howeyc/fsnotify/issues/25
|
||||
[#24]: https://github.com/howeyc/fsnotify/issues/24
|
||||
[#21]: https://github.com/howeyc/fsnotify/issues/21
|
77
vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
generated
vendored
77
vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
generated
vendored
@ -1,77 +0,0 @@
|
||||
# Contributing
|
||||
|
||||
## Issues
|
||||
|
||||
* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/fsnotify/fsnotify/issues).
|
||||
* Please indicate the platform you are using fsnotify on.
|
||||
* A code example to reproduce the problem is appreciated.
|
||||
|
||||
## Pull Requests
|
||||
|
||||
### Contributor License Agreement
|
||||
|
||||
fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual).
|
||||
|
||||
Please indicate that you have signed the CLA in your pull request.
|
||||
|
||||
### How fsnotify is Developed
|
||||
|
||||
* Development is done on feature branches.
|
||||
* Tests are run on BSD, Linux, macOS and Windows.
|
||||
* Pull requests are reviewed and [applied to master][am] using [hub][].
|
||||
* Maintainers may modify or squash commits rather than asking contributors to.
|
||||
* To issue a new release, the maintainers will:
|
||||
* Update the CHANGELOG
|
||||
* Tag a version, which will become available through gopkg.in.
|
||||
|
||||
### How to Fork
|
||||
|
||||
For smooth sailing, always use the original import path. Installing with `go get` makes this easy.
|
||||
|
||||
1. Install from GitHub (`go get -u github.com/fsnotify/fsnotify`)
|
||||
2. Create your feature branch (`git checkout -b my-new-feature`)
|
||||
3. Ensure everything works and the tests pass (see below)
|
||||
4. Commit your changes (`git commit -am 'Add some feature'`)
|
||||
|
||||
Contribute upstream:
|
||||
|
||||
1. Fork fsnotify on GitHub
|
||||
2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`)
|
||||
3. Push to the branch (`git push fork my-new-feature`)
|
||||
4. Create a new Pull Request on GitHub
|
||||
|
||||
This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/contributing-open-source-git-repositories-go/).
|
||||
|
||||
### Testing
|
||||
|
||||
fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows.
|
||||
|
||||
Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on.
|
||||
|
||||
To aid in cross-platform testing there is a Vagrantfile for Linux and BSD.
|
||||
|
||||
* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/)
|
||||
* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder.
|
||||
* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password)
|
||||
* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd fsnotify/fsnotify; go test'`.
|
||||
* When you're done, you will want to halt or destroy the Vagrant boxes.
|
||||
|
||||
Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory.
|
||||
|
||||
Right now there is no equivalent solution for Windows and macOS, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads).
|
||||
|
||||
### Maintainers
|
||||
|
||||
Help maintaining fsnotify is welcome. To be a maintainer:
|
||||
|
||||
* Submit a pull request and sign the CLA as above.
|
||||
* You must be able to run the test suite on Mac, Windows, Linux and BSD.
|
||||
|
||||
To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][].
|
||||
|
||||
All code changes should be internal pull requests.
|
||||
|
||||
Releases are tagged using [Semantic Versioning](http://semver.org/).
|
||||
|
||||
[hub]: https://github.com/github/hub
|
||||
[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs
|
14
vendor/github.com/gogo/protobuf/AUTHORS
generated
vendored
14
vendor/github.com/gogo/protobuf/AUTHORS
generated
vendored
@ -1,14 +0,0 @@
|
||||
# This is the official list of GoGo authors for copyright purposes.
|
||||
# This file is distinct from the CONTRIBUTORS file, which
|
||||
# lists people. For example, employees are listed in CONTRIBUTORS,
|
||||
# but not in AUTHORS, because the employer holds the copyright.
|
||||
|
||||
# Names should be added to this file as one of
|
||||
# Organization's name
|
||||
# Individual's name <submission email address>
|
||||
# Individual's name <submission email address> <email2> <emailN>
|
||||
|
||||
# Please keep the list sorted.
|
||||
|
||||
Vastech SA (PTY) LTD
|
||||
Walter Schulze <awalterschulze@gmail.com>
|
18
vendor/github.com/gogo/protobuf/CONTRIBUTORS
generated
vendored
18
vendor/github.com/gogo/protobuf/CONTRIBUTORS
generated
vendored
@ -1,18 +0,0 @@
|
||||
Anton Povarov <anton.povarov@gmail.com>
|
||||
Clayton Coleman <ccoleman@redhat.com>
|
||||
Denis Smirnov <denis.smirnov.91@gmail.com>
|
||||
DongYun Kang <ceram1000@gmail.com>
|
||||
Dwayne Schultz <dschultz@pivotal.io>
|
||||
Georg Apitz <gapitz@pivotal.io>
|
||||
Gustav Paul <gustav.paul@gmail.com>
|
||||
Johan Brandhorst <johan.brandhorst@gmail.com>
|
||||
John Shahid <jvshahid@gmail.com>
|
||||
John Tuley <john@tuley.org>
|
||||
Laurent <laurent@adyoulike.com>
|
||||
Patrick Lee <patrick@dropbox.com>
|
||||
Sergio Arbeo <serabe@gmail.com>
|
||||
Stephen J Day <stephen.day@docker.com>
|
||||
Tamir Duberstein <tamird@gmail.com>
|
||||
Todd Eisenberger <teisenberger@dropbox.com>
|
||||
Tormod Erevik Lea <tormodlea@gmail.com>
|
||||
Walter Schulze <awalterschulze@gmail.com>
|
258
vendor/github.com/gogo/protobuf/README
generated
vendored
Normal file
258
vendor/github.com/gogo/protobuf/README
generated
vendored
Normal file
@ -0,0 +1,258 @@
|
||||
GoGoProtobuf http://github.com/gogo/protobuf extends
|
||||
GoProtobuf http://github.com/golang/protobuf
|
||||
|
||||
# Go support for Protocol Buffers
|
||||
|
||||
Google's data interchange format.
|
||||
Copyright 2010 The Go Authors.
|
||||
https://github.com/golang/protobuf
|
||||
|
||||
This package and the code it generates requires at least Go 1.4.
|
||||
|
||||
This software implements Go bindings for protocol buffers. For
|
||||
information about protocol buffers themselves, see
|
||||
https://developers.google.com/protocol-buffers/
|
||||
|
||||
## Installation ##
|
||||
|
||||
To use this software, you must:
|
||||
- Install the standard C++ implementation of protocol buffers from
|
||||
https://developers.google.com/protocol-buffers/
|
||||
- Of course, install the Go compiler and tools from
|
||||
https://golang.org/
|
||||
See
|
||||
https://golang.org/doc/install
|
||||
for details or, if you are using gccgo, follow the instructions at
|
||||
https://golang.org/doc/install/gccgo
|
||||
- Grab the code from the repository and install the proto package.
|
||||
The simplest way is to run `go get -u github.com/golang/protobuf/{proto,protoc-gen-go}`.
|
||||
The compiler plugin, protoc-gen-go, will be installed in $GOBIN,
|
||||
defaulting to $GOPATH/bin. It must be in your $PATH for the protocol
|
||||
compiler, protoc, to find it.
|
||||
|
||||
This software has two parts: a 'protocol compiler plugin' that
|
||||
generates Go source files that, once compiled, can access and manage
|
||||
protocol buffers; and a library that implements run-time support for
|
||||
encoding (marshaling), decoding (unmarshaling), and accessing protocol
|
||||
buffers.
|
||||
|
||||
There is support for gRPC in Go using protocol buffers.
|
||||
See the note at the bottom of this file for details.
|
||||
|
||||
There are no insertion points in the plugin.
|
||||
|
||||
GoGoProtobuf provides extensions for protocol buffers and GoProtobuf
|
||||
see http://github.com/gogo/protobuf/gogoproto/doc.go
|
||||
|
||||
## Using protocol buffers with Go ##
|
||||
|
||||
Once the software is installed, there are two steps to using it.
|
||||
First you must compile the protocol buffer definitions and then import
|
||||
them, with the support library, into your program.
|
||||
|
||||
To compile the protocol buffer definition, run protoc with the --gogo_out
|
||||
parameter set to the directory you want to output the Go code to.
|
||||
|
||||
protoc --gogo_out=. *.proto
|
||||
|
||||
The generated files will be suffixed .pb.go. See the Test code below
|
||||
for an example using such a file.
|
||||
|
||||
The package comment for the proto library contains text describing
|
||||
the interface provided in Go for protocol buffers. Here is an edited
|
||||
version.
|
||||
|
||||
If you are using any gogo.proto extensions you will need to specify the
|
||||
proto_path to include the descriptor.proto and gogo.proto.
|
||||
gogo.proto is located in github.com/gogo/protobuf/gogoproto
|
||||
This should be fine, since your import is the same.
|
||||
descriptor.proto is located in either github.com/gogo/protobuf/protobuf
|
||||
or code.google.com/p/protobuf/trunk/src/
|
||||
Its import is google/protobuf/descriptor.proto so it might need some help.
|
||||
|
||||
protoc --gogo_out=. -I=.:github.com/gogo/protobuf/protobuf *.proto
|
||||
|
||||
==========
|
||||
|
||||
The proto package converts data structures to and from the
|
||||
wire format of protocol buffers. It works in concert with the
|
||||
Go source code generated for .proto files by the protocol compiler.
|
||||
|
||||
A summary of the properties of the protocol buffer interface
|
||||
for a protocol buffer variable v:
|
||||
|
||||
- Names are turned from camel_case to CamelCase for export.
|
||||
- There are no methods on v to set fields; just treat
|
||||
them as structure fields.
|
||||
- There are getters that return a field's value if set,
|
||||
and return the field's default value if unset.
|
||||
The getters work even if the receiver is a nil message.
|
||||
- The zero value for a struct is its correct initialization state.
|
||||
All desired fields must be set before marshaling.
|
||||
- A Reset() method will restore a protobuf struct to its zero state.
|
||||
- Non-repeated fields are pointers to the values; nil means unset.
|
||||
That is, optional or required field int32 f becomes F *int32.
|
||||
- Repeated fields are slices.
|
||||
- Helper functions are available to aid the setting of fields.
|
||||
Helpers for getting values are superseded by the
|
||||
GetFoo methods and their use is deprecated.
|
||||
msg.Foo = proto.String("hello") // set field
|
||||
- Constants are defined to hold the default values of all fields that
|
||||
have them. They have the form Default_StructName_FieldName.
|
||||
Because the getter methods handle defaulted values,
|
||||
direct use of these constants should be rare.
|
||||
- Enums are given type names and maps from names to values.
|
||||
Enum values are prefixed with the enum's type name. Enum types have
|
||||
a String method, and a Enum method to assist in message construction.
|
||||
- Nested groups and enums have type names prefixed with the name of
|
||||
the surrounding message type.
|
||||
- Extensions are given descriptor names that start with E_,
|
||||
followed by an underscore-delimited list of the nested messages
|
||||
that contain it (if any) followed by the CamelCased name of the
|
||||
extension field itself. HasExtension, ClearExtension, GetExtension
|
||||
and SetExtension are functions for manipulating extensions.
|
||||
- Oneof field sets are given a single field in their message,
|
||||
with distinguished wrapper types for each possible field value.
|
||||
- Marshal and Unmarshal are functions to encode and decode the wire format.
|
||||
|
||||
When the .proto file specifies `syntax="proto3"`, there are some differences:
|
||||
|
||||
- Non-repeated fields of non-message type are values instead of pointers.
|
||||
- Getters are only generated for message and oneof fields.
|
||||
- Enum types do not get an Enum method.
|
||||
|
||||
Consider file test.proto, containing
|
||||
|
||||
```proto
|
||||
package example;
|
||||
|
||||
enum FOO { X = 17; };
|
||||
|
||||
message Test {
|
||||
required string label = 1;
|
||||
optional int32 type = 2 [default=77];
|
||||
repeated int64 reps = 3;
|
||||
optional group OptionalGroup = 4 {
|
||||
required string RequiredField = 5;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
To create and play with a Test object from the example package,
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"path/to/example"
|
||||
)
|
||||
|
||||
func main() {
|
||||
test := &example.Test {
|
||||
Label: proto.String("hello"),
|
||||
Type: proto.Int32(17),
|
||||
Reps: []int64{1, 2, 3},
|
||||
Optionalgroup: &example.Test_OptionalGroup {
|
||||
RequiredField: proto.String("good bye"),
|
||||
},
|
||||
}
|
||||
data, err := proto.Marshal(test)
|
||||
if err != nil {
|
||||
log.Fatal("marshaling error: ", err)
|
||||
}
|
||||
newTest := &example.Test{}
|
||||
err = proto.Unmarshal(data, newTest)
|
||||
if err != nil {
|
||||
log.Fatal("unmarshaling error: ", err)
|
||||
}
|
||||
// Now test and newTest contain the same data.
|
||||
if test.GetLabel() != newTest.GetLabel() {
|
||||
log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
|
||||
}
|
||||
// etc.
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
## Parameters ##
|
||||
|
||||
To pass extra parameters to the plugin, use a comma-separated
|
||||
parameter list separated from the output directory by a colon:
|
||||
|
||||
|
||||
protoc --gogo_out=plugins=grpc,import_path=mypackage:. *.proto
|
||||
|
||||
|
||||
- `import_prefix=xxx` - a prefix that is added onto the beginning of
|
||||
all imports. Useful for things like generating protos in a
|
||||
subdirectory, or regenerating vendored protobufs in-place.
|
||||
- `import_path=foo/bar` - used as the package if no input files
|
||||
declare `go_package`. If it contains slashes, everything up to the
|
||||
rightmost slash is ignored.
|
||||
- `plugins=plugin1+plugin2` - specifies the list of sub-plugins to
|
||||
load. The only plugin in this repo is `grpc`.
|
||||
- `Mfoo/bar.proto=quux/shme` - declares that foo/bar.proto is
|
||||
associated with Go package quux/shme. This is subject to the
|
||||
import_prefix parameter.
|
||||
|
||||
## gRPC Support ##
|
||||
|
||||
If a proto file specifies RPC services, protoc-gen-go can be instructed to
|
||||
generate code compatible with gRPC (http://www.grpc.io/). To do this, pass
|
||||
the `plugins` parameter to protoc-gen-go; the usual way is to insert it into
|
||||
the --go_out argument to protoc:
|
||||
|
||||
protoc --gogo_out=plugins=grpc:. *.proto
|
||||
|
||||
## Compatibility ##
|
||||
|
||||
The library and the generated code are expected to be stable over time.
|
||||
However, we reserve the right to make breaking changes without notice for the
|
||||
following reasons:
|
||||
|
||||
- Security. A security issue in the specification or implementation may come to
|
||||
light whose resolution requires breaking compatibility. We reserve the right
|
||||
to address such security issues.
|
||||
- Unspecified behavior. There are some aspects of the Protocol Buffers
|
||||
specification that are undefined. Programs that depend on such unspecified
|
||||
behavior may break in future releases.
|
||||
- Specification errors or changes. If it becomes necessary to address an
|
||||
inconsistency, incompleteness, or change in the Protocol Buffers
|
||||
specification, resolving the issue could affect the meaning or legality of
|
||||
existing programs. We reserve the right to address such issues, including
|
||||
updating the implementations.
|
||||
- Bugs. If the library has a bug that violates the specification, a program
|
||||
that depends on the buggy behavior may break if the bug is fixed. We reserve
|
||||
the right to fix such bugs.
|
||||
- Adding methods or fields to generated structs. These may conflict with field
|
||||
names that already exist in a schema, causing applications to break. When the
|
||||
code generator encounters a field in the schema that would collide with a
|
||||
generated field or method name, the code generator will append an underscore
|
||||
to the generated field or method name.
|
||||
- Adding, removing, or changing methods or fields in generated structs that
|
||||
start with `XXX`. These parts of the generated code are exported out of
|
||||
necessity, but should not be considered part of the public API.
|
||||
- Adding, removing, or changing unexported symbols in generated code.
|
||||
|
||||
Any breaking changes outside of these will be announced 6 months in advance to
|
||||
protobuf@googlegroups.com.
|
||||
|
||||
You should, whenever possible, use generated code created by the `protoc-gen-go`
|
||||
tool built at the same commit as the `proto` package. The `proto` package
|
||||
declares package-level constants in the form `ProtoPackageIsVersionX`.
|
||||
Application code and generated code may depend on one of these constants to
|
||||
ensure that compilation will fail if the available version of the proto library
|
||||
is too old. Whenever we make a change to the generated code that requires newer
|
||||
library support, in the same commit we will increment the version number of the
|
||||
generated code and declare a new package-level constant whose name incorporates
|
||||
the latest version number. Removing a compatibility constant is considered a
|
||||
breaking change and would be subject to the announcement policy stated above.
|
||||
|
||||
## Plugins ##
|
||||
|
||||
The `protoc-gen-go/generator` package exposes a plugin interface,
|
||||
which is used by the gRPC code generation. This interface is not
|
||||
supported and is subject to incompatible changes without notice.
|
117
vendor/github.com/gogo/protobuf/Readme.md
generated
vendored
Normal file
117
vendor/github.com/gogo/protobuf/Readme.md
generated
vendored
Normal file
@ -0,0 +1,117 @@
|
||||
# Protocol Buffers for Go with Gadgets
|
||||
|
||||
[](https://travis-ci.org/gogo/protobuf)
|
||||
|
||||
gogoprotobuf is a fork of <a href="https://github.com/golang/protobuf">golang/protobuf</a> with extra code generation features.
|
||||
|
||||
This code generation is used to achieve:
|
||||
|
||||
- fast marshalling and unmarshalling
|
||||
- more canonical Go structures
|
||||
- goprotobuf compatibility
|
||||
- less typing by optionally generating extra helper code
|
||||
- peace of mind by optionally generating test and benchmark code
|
||||
- other serialization formats
|
||||
|
||||
Keeping track of how up to date gogoprotobuf is relative to golang/protobuf is done in this
|
||||
<a href="https://github.com/gogo/protobuf/issues/191">issue</a>
|
||||
|
||||
## Users
|
||||
|
||||
These projects use gogoprotobuf:
|
||||
|
||||
- <a href="http://godoc.org/github.com/coreos/etcd">etcd</a> - <a href="https://blog.gopheracademy.com/advent-2015/etcd-distributed-key-value-store-with-grpc-http2/">blog</a> - <a href="https://github.com/coreos/etcd/blob/master/etcdserver/etcdserverpb/etcdserver.proto">sample proto file</a>
|
||||
- <a href="https://www.spacemonkey.com/">spacemonkey</a> - <a href="https://www.spacemonkey.com/blog/posts/go-space-monkey">blog</a>
|
||||
- <a href="http://badoo.com">badoo</a> - <a href="https://github.com/badoo/lsd/blob/32061f501c5eca9c76c596d790b450501ba27b2f/proto/lsd.proto">sample proto file</a>
|
||||
- <a href="https://github.com/mesos/mesos-go">mesos-go</a> - <a href="https://github.com/mesos/mesos-go/blob/master/mesosproto/mesos.proto">sample proto file</a>
|
||||
- <a href="https://github.com/mozilla-services/heka">heka</a> - <a href="https://github.com/mozilla-services/heka/commit/eb72fbf7d2d28249fbaf8d8dc6607f4eb6f03351">the switch from golang/protobuf to gogo/protobuf when it was still on code.google.com</a>
|
||||
- <a href="https://github.com/cockroachdb/cockroach">cockroachdb</a> - <a href="https://github.com/cockroachdb/cockroach/blob/651d54d393e391a30154e9117ab4b18d9ee6d845/roachpb/metadata.proto">sample proto file</a>
|
||||
- <a href="https://github.com/jbenet/go-ipfs">go-ipfs</a> - <a href="https://github.com/ipfs/go-ipfs/blob/2b6da0c024f28abeb16947fb452787196a6b56a2/merkledag/pb/merkledag.proto">sample proto file</a>
|
||||
- <a href="https://github.com/philhofer/rkive">rkive-go</a> - <a href="https://github.com/philhofer/rkive/blob/e5dd884d3ea07b341321073882ae28aa16dd11be/rpbc/riak_dt.proto">sample proto file</a>
|
||||
- <a href="https://www.dropbox.com">dropbox</a>
|
||||
- <a href="https://srclib.org/">srclib</a> - <a href="https://github.com/sourcegraph/srclib/blob/6538858f0c410cac5c63440317b8d009e889d3fb/graph/def.proto">sample proto file</a>
|
||||
- <a href="http://www.adyoulike.com/">adyoulike</a>
|
||||
- <a href="http://www.cloudfoundry.org/">cloudfoundry</a> - <a href="https://github.com/cloudfoundry/bbs/blob/d673710b8c4211037805129944ee4c5373d6588a/models/events.proto">sample proto file</a>
|
||||
- <a href="http://kubernetes.io/">kubernetes</a> - <a href="https://github.com/kubernetes/kubernetes/tree/88d8628137f94ee816aaa6606ae8cd045dee0bff/cmd/libs/go2idl">go2idl built on top of gogoprotobuf</a>
|
||||
- <a href="https://dgraph.io/">dgraph</a> - <a href="https://github.com/dgraph-io/dgraph/releases/tag/v0.4.3">release notes</a> - <a href="https://discuss.dgraph.io/t/gogoprotobuf-is-extremely-fast/639">benchmarks</a></a>
|
||||
- <a href="https://github.com/centrifugal/centrifugo">centrifugo</a> - <a href="https://forum.golangbridge.org/t/centrifugo-real-time-messaging-websocket-or-sockjs-server-v1-5-0-released/2861">release notes</a> - <a href="https://medium.com/@fzambia/centrifugo-protobuf-inside-json-outside-21d39bdabd68#.o3icmgjqd">blog</a>
|
||||
- <a href="https://github.com/docker/swarmkit">docker swarmkit</a> - <a href="https://github.com/docker/swarmkit/blob/63600e01af3b8da2a0ed1c9fa6e1ae4299d75edb/api/objects.proto">sample proto file</a>
|
||||
- <a href="https://nats.io/">nats.io</a> - <a href="https://github.com/nats-io/go-nats-streaming/blob/master/pb/protocol.proto">go-nats-streaming</a>
|
||||
- <a href="https://github.com/pingcap/tidb">tidb</a> - Communication between <a href="https://github.com/pingcap/tipb/blob/master/generate-go.sh#L4">tidb</a> and <a href="https://github.com/pingcap/kvproto/blob/master/generate_go.sh#L3">tikv</a>
|
||||
- <a href="https://github.com/AsynkronIT/protoactor-go">protoactor-go</a> - <a href="https://github.com/AsynkronIT/protoactor-go/blob/dev/protobuf/protoc-gen-protoactor/main.go">vanity command</a> that also generates actors from service definitions
|
||||
|
||||
Please lets us know if you are using gogoprotobuf by posting on our <a href="https://groups.google.com/forum/#!topic/gogoprotobuf/Brw76BxmFpQ">GoogleGroup</a>.
|
||||
|
||||
### Mentioned
|
||||
|
||||
- <a href="http://www.slideshare.net/albertstrasheim/serialization-in-go">Cloudflare - go serialization talk - Albert Strasheim</a>
|
||||
- <a href="http://gophercon.sourcegraph.com/post/83747547505/writing-a-high-performance-database-in-go">gophercon</a>
|
||||
- <a href="https://github.com/alecthomas/go_serialization_benchmarks">alecthomas' go serialization benchmarks</a>
|
||||
|
||||
## Getting Started
|
||||
|
||||
There are several ways to use gogoprotobuf, but for all you need to install go and protoc.
|
||||
After that you can choose:
|
||||
|
||||
- Speed
|
||||
- More Speed and more generated code
|
||||
- Most Speed and most customization
|
||||
|
||||
### Installation
|
||||
|
||||
To install it, you must first have Go (at least version 1.3.3) installed (see [http://golang.org/doc/install](http://golang.org/doc/install)). Go 1.7.1 is continuously tested.
|
||||
|
||||
Next, install the standard protocol buffer implementation from [https://github.com/google/protobuf](https://github.com/google/protobuf).
|
||||
Most versions from 2.3.1 should not give any problems, but 2.6.1, 3.0.2 and 3.1.0 are continuously tested.
|
||||
|
||||
### Speed
|
||||
|
||||
Install the protoc-gen-gofast binary
|
||||
|
||||
go get github.com/gogo/protobuf/protoc-gen-gofast
|
||||
|
||||
Use it to generate faster marshaling and unmarshaling go code for your protocol buffers.
|
||||
|
||||
protoc --gofast_out=. myproto.proto
|
||||
|
||||
This does not allow you to use any of the other gogoprotobuf [extensions](https://github.com/gogo/protobuf/blob/master/extensions.md).
|
||||
|
||||
### More Speed and more generated code
|
||||
|
||||
Fields without pointers cause less time in the garbage collector.
|
||||
More code generation results in more convenient methods.
|
||||
|
||||
Other binaries are also included:
|
||||
|
||||
protoc-gen-gogofast (same as gofast, but imports gogoprotobuf)
|
||||
protoc-gen-gogofaster (same as gogofast, without XXX_unrecognized, less pointer fields)
|
||||
protoc-gen-gogoslick (same as gogofaster, but with generated string, gostring and equal methods)
|
||||
|
||||
Installing any of these binaries is easy. Simply run:
|
||||
|
||||
go get github.com/gogo/protobuf/proto
|
||||
go get github.com/gogo/protobuf/{binary}
|
||||
go get github.com/gogo/protobuf/gogoproto
|
||||
|
||||
These binaries allow you to using gogoprotobuf [extensions](https://github.com/gogo/protobuf/blob/master/extensions.md).
|
||||
|
||||
### Most Speed and most customization
|
||||
|
||||
Customizing the fields of the messages to be the fields that you actually want to use removes the need to copy between the structs you use and structs you use to serialize.
|
||||
gogoprotobuf also offers more serialization formats and generation of tests and even more methods.
|
||||
|
||||
Please visit the [extensions](https://github.com/gogo/protobuf/blob/master/extensions.md) page for more documentation.
|
||||
|
||||
Install protoc-gen-gogo:
|
||||
|
||||
go get github.com/gogo/protobuf/proto
|
||||
go get github.com/gogo/protobuf/jsonpb
|
||||
go get github.com/gogo/protobuf/protoc-gen-gogo
|
||||
go get github.com/gogo/protobuf/gogoproto
|
||||
|
||||
## GRPC
|
||||
|
||||
It works the same as golang/protobuf, simply specify the plugin.
|
||||
Here is an example using gofast:
|
||||
|
||||
protoc --gofast_out=plugins=grpc:. my.proto
|
37
vendor/github.com/gogo/protobuf/gogoproto/Makefile
generated
vendored
37
vendor/github.com/gogo/protobuf/gogoproto/Makefile
generated
vendored
@ -1,37 +0,0 @@
|
||||
# Protocol Buffers for Go with Gadgets
|
||||
#
|
||||
# Copyright (c) 2013, The GoGo Authors. All rights reserved.
|
||||
# http://github.com/gogo/protobuf
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following disclaimer
|
||||
# in the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
regenerate:
|
||||
go install github.com/gogo/protobuf/protoc-gen-gogo
|
||||
protoc --gogo_out=Mgoogle/protobuf/descriptor.proto=github.com/gogo/protobuf/protoc-gen-gogo/descriptor:. --proto_path=../../../../:../protobuf/:. *.proto
|
||||
|
||||
restore:
|
||||
cp gogo.pb.golden gogo.pb.go
|
||||
|
||||
preserve:
|
||||
cp gogo.pb.go gogo.pb.golden
|
45
vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden
generated
vendored
45
vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden
generated
vendored
@ -1,45 +0,0 @@
|
||||
// Code generated by protoc-gen-go.
|
||||
// source: gogo.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
package gogoproto
|
||||
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import json "encoding/json"
|
||||
import math "math"
|
||||
import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
|
||||
|
||||
// Reference proto, json, and math imports to suppress error if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = &json.SyntaxError{}
|
||||
var _ = math.Inf
|
||||
|
||||
var E_Nullable = &proto.ExtensionDesc{
|
||||
ExtendedType: (*google_protobuf.FieldOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 51235,
|
||||
Name: "gogoproto.nullable",
|
||||
Tag: "varint,51235,opt,name=nullable",
|
||||
}
|
||||
|
||||
var E_Embed = &proto.ExtensionDesc{
|
||||
ExtendedType: (*google_protobuf.FieldOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 51236,
|
||||
Name: "gogoproto.embed",
|
||||
Tag: "varint,51236,opt,name=embed",
|
||||
}
|
||||
|
||||
var E_Customtype = &proto.ExtensionDesc{
|
||||
ExtendedType: (*google_protobuf.FieldOptions)(nil),
|
||||
ExtensionType: (*string)(nil),
|
||||
Field: 51237,
|
||||
Name: "gogoproto.customtype",
|
||||
Tag: "bytes,51237,opt,name=customtype",
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterExtension(E_Nullable)
|
||||
proto.RegisterExtension(E_Embed)
|
||||
proto.RegisterExtension(E_Customtype)
|
||||
}
|
43
vendor/github.com/gogo/protobuf/proto/Makefile
generated
vendored
43
vendor/github.com/gogo/protobuf/proto/Makefile
generated
vendored
@ -1,43 +0,0 @@
|
||||
# Go support for Protocol Buffers - Google's data interchange format
|
||||
#
|
||||
# Copyright 2010 The Go Authors. All rights reserved.
|
||||
# https://github.com/golang/protobuf
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following disclaimer
|
||||
# in the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
# * Neither the name of Google Inc. nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
install:
|
||||
go install
|
||||
|
||||
test: install generate-test-pbs
|
||||
go test
|
||||
|
||||
|
||||
generate-test-pbs:
|
||||
make install
|
||||
make -C testdata
|
||||
protoc-min-version --version="3.0.0" --proto_path=.:../../../../:../protobuf --gogo_out=Mtestdata/test.proto=github.com/gogo/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/gogo/protobuf/types:. proto3_proto/proto3.proto
|
||||
make
|
140
vendor/github.com/gogo/protobuf/protobuf/google/protobuf/any.proto
generated
vendored
Normal file
140
vendor/github.com/gogo/protobuf/protobuf/google/protobuf/any.proto
generated
vendored
Normal file
@ -0,0 +1,140 @@
|
||||
// Protocol Buffers - Google's data interchange format
|
||||
// Copyright 2008 Google Inc. All rights reserved.
|
||||
// https://developers.google.com/protocol-buffers/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.protobuf;
|
||||
|
||||
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
|
||||
option go_package = "types";
|
||||
option java_package = "com.google.protobuf";
|
||||
option java_outer_classname = "AnyProto";
|
||||
option java_multiple_files = true;
|
||||
option java_generate_equals_and_hash = true;
|
||||
option objc_class_prefix = "GPB";
|
||||
|
||||
// `Any` contains an arbitrary serialized protocol buffer message along with a
|
||||
// URL that describes the type of the serialized message.
|
||||
//
|
||||
// Protobuf library provides support to pack/unpack Any values in the form
|
||||
// of utility functions or additional generated methods of the Any type.
|
||||
//
|
||||
// Example 1: Pack and unpack a message in C++.
|
||||
//
|
||||
// Foo foo = ...;
|
||||
// Any any;
|
||||
// any.PackFrom(foo);
|
||||
// ...
|
||||
// if (any.UnpackTo(&foo)) {
|
||||
// ...
|
||||
// }
|
||||
//
|
||||
// Example 2: Pack and unpack a message in Java.
|
||||
//
|
||||
// Foo foo = ...;
|
||||
// Any any = Any.pack(foo);
|
||||
// ...
|
||||
// if (any.is(Foo.class)) {
|
||||
// foo = any.unpack(Foo.class);
|
||||
// }
|
||||
//
|
||||
// Example 3: Pack and unpack a message in Python.
|
||||
//
|
||||
// foo = Foo(...)
|
||||
// any = Any()
|
||||
// any.Pack(foo)
|
||||
// ...
|
||||
// if any.Is(Foo.DESCRIPTOR):
|
||||
// any.Unpack(foo)
|
||||
// ...
|
||||
//
|
||||
// The pack methods provided by protobuf library will by default use
|
||||
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
|
||||
// methods only use the fully qualified type name after the last '/'
|
||||
// in the type URL, for example "foo.bar.com/x/y.z" will yield type
|
||||
// name "y.z".
|
||||
//
|
||||
//
|
||||
// JSON
|
||||
// ====
|
||||
// The JSON representation of an `Any` value uses the regular
|
||||
// representation of the deserialized, embedded message, with an
|
||||
// additional field `@type` which contains the type URL. Example:
|
||||
//
|
||||
// package google.profile;
|
||||
// message Person {
|
||||
// string first_name = 1;
|
||||
// string last_name = 2;
|
||||
// }
|
||||
//
|
||||
// {
|
||||
// "@type": "type.googleapis.com/google.profile.Person",
|
||||
// "firstName": <string>,
|
||||
// "lastName": <string>
|
||||
// }
|
||||
//
|
||||
// If the embedded message type is well-known and has a custom JSON
|
||||
// representation, that representation will be embedded adding a field
|
||||
// `value` which holds the custom JSON in addition to the `@type`
|
||||
// field. Example (for message [google.protobuf.Duration][]):
|
||||
//
|
||||
// {
|
||||
// "@type": "type.googleapis.com/google.protobuf.Duration",
|
||||
// "value": "1.212s"
|
||||
// }
|
||||
//
|
||||
message Any {
|
||||
// A URL/resource name whose content describes the type of the
|
||||
// serialized protocol buffer message.
|
||||
//
|
||||
// For URLs which use the scheme `http`, `https`, or no scheme, the
|
||||
// following restrictions and interpretations apply:
|
||||
//
|
||||
// * If no scheme is provided, `https` is assumed.
|
||||
// * The last segment of the URL's path must represent the fully
|
||||
// qualified name of the type (as in `path/google.protobuf.Duration`).
|
||||
// The name should be in a canonical form (e.g., leading "." is
|
||||
// not accepted).
|
||||
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
|
||||
// value in binary format, or produce an error.
|
||||
// * Applications are allowed to cache lookup results based on the
|
||||
// URL, or have them precompiled into a binary to avoid any
|
||||
// lookup. Therefore, binary compatibility needs to be preserved
|
||||
// on changes to types. (Use versioned type names to manage
|
||||
// breaking changes.)
|
||||
//
|
||||
// Schemes other than `http`, `https` (or the empty scheme) might be
|
||||
// used with implementation specific semantics.
|
||||
//
|
||||
string type_url = 1;
|
||||
|
||||
// Must be a valid serialized protocol buffer of the above specified type.
|
||||
bytes value = 2;
|
||||
}
|
150
vendor/github.com/gogo/protobuf/protobuf/google/protobuf/compiler/plugin.proto
generated
vendored
Normal file
150
vendor/github.com/gogo/protobuf/protobuf/google/protobuf/compiler/plugin.proto
generated
vendored
Normal file
@ -0,0 +1,150 @@
|
||||
// Protocol Buffers - Google's data interchange format
|
||||
// Copyright 2008 Google Inc. All rights reserved.
|
||||
// https://developers.google.com/protocol-buffers/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Author: kenton@google.com (Kenton Varda)
|
||||
//
|
||||
// WARNING: The plugin interface is currently EXPERIMENTAL and is subject to
|
||||
// change.
|
||||
//
|
||||
// protoc (aka the Protocol Compiler) can be extended via plugins. A plugin is
|
||||
// just a program that reads a CodeGeneratorRequest from stdin and writes a
|
||||
// CodeGeneratorResponse to stdout.
|
||||
//
|
||||
// Plugins written using C++ can use google/protobuf/compiler/plugin.h instead
|
||||
// of dealing with the raw protocol defined here.
|
||||
//
|
||||
// A plugin executable needs only to be placed somewhere in the path. The
|
||||
// plugin should be named "protoc-gen-$NAME", and will then be used when the
|
||||
// flag "--${NAME}_out" is passed to protoc.
|
||||
|
||||
syntax = "proto2";
|
||||
package google.protobuf.compiler;
|
||||
option java_package = "com.google.protobuf.compiler";
|
||||
option java_outer_classname = "PluginProtos";
|
||||
|
||||
option go_package = "plugin_go";
|
||||
|
||||
import "google/protobuf/descriptor.proto";
|
||||
|
||||
// An encoded CodeGeneratorRequest is written to the plugin's stdin.
|
||||
message CodeGeneratorRequest {
|
||||
// The .proto files that were explicitly listed on the command-line. The
|
||||
// code generator should generate code only for these files. Each file's
|
||||
// descriptor will be included in proto_file, below.
|
||||
repeated string file_to_generate = 1;
|
||||
|
||||
// The generator parameter passed on the command-line.
|
||||
optional string parameter = 2;
|
||||
|
||||
// FileDescriptorProtos for all files in files_to_generate and everything
|
||||
// they import. The files will appear in topological order, so each file
|
||||
// appears before any file that imports it.
|
||||
//
|
||||
// protoc guarantees that all proto_files will be written after
|
||||
// the fields above, even though this is not technically guaranteed by the
|
||||
// protobuf wire format. This theoretically could allow a plugin to stream
|
||||
// in the FileDescriptorProtos and handle them one by one rather than read
|
||||
// the entire set into memory at once. However, as of this writing, this
|
||||
// is not similarly optimized on protoc's end -- it will store all fields in
|
||||
// memory at once before sending them to the plugin.
|
||||
repeated FileDescriptorProto proto_file = 15;
|
||||
}
|
||||
|
||||
// The plugin writes an encoded CodeGeneratorResponse to stdout.
|
||||
message CodeGeneratorResponse {
|
||||
// Error message. If non-empty, code generation failed. The plugin process
|
||||
// should exit with status code zero even if it reports an error in this way.
|
||||
//
|
||||
// This should be used to indicate errors in .proto files which prevent the
|
||||
// code generator from generating correct code. Errors which indicate a
|
||||
// problem in protoc itself -- such as the input CodeGeneratorRequest being
|
||||
// unparseable -- should be reported by writing a message to stderr and
|
||||
// exiting with a non-zero status code.
|
||||
optional string error = 1;
|
||||
|
||||
// Represents a single generated file.
|
||||
message File {
|
||||
// The file name, relative to the output directory. The name must not
|
||||
// contain "." or ".." components and must be relative, not be absolute (so,
|
||||
// the file cannot lie outside the output directory). "/" must be used as
|
||||
// the path separator, not "\".
|
||||
//
|
||||
// If the name is omitted, the content will be appended to the previous
|
||||
// file. This allows the generator to break large files into small chunks,
|
||||
// and allows the generated text to be streamed back to protoc so that large
|
||||
// files need not reside completely in memory at one time. Note that as of
|
||||
// this writing protoc does not optimize for this -- it will read the entire
|
||||
// CodeGeneratorResponse before writing files to disk.
|
||||
optional string name = 1;
|
||||
|
||||
// If non-empty, indicates that the named file should already exist, and the
|
||||
// content here is to be inserted into that file at a defined insertion
|
||||
// point. This feature allows a code generator to extend the output
|
||||
// produced by another code generator. The original generator may provide
|
||||
// insertion points by placing special annotations in the file that look
|
||||
// like:
|
||||
// @@protoc_insertion_point(NAME)
|
||||
// The annotation can have arbitrary text before and after it on the line,
|
||||
// which allows it to be placed in a comment. NAME should be replaced with
|
||||
// an identifier naming the point -- this is what other generators will use
|
||||
// as the insertion_point. Code inserted at this point will be placed
|
||||
// immediately above the line containing the insertion point (thus multiple
|
||||
// insertions to the same point will come out in the order they were added).
|
||||
// The double-@ is intended to make it unlikely that the generated code
|
||||
// could contain things that look like insertion points by accident.
|
||||
//
|
||||
// For example, the C++ code generator places the following line in the
|
||||
// .pb.h files that it generates:
|
||||
// // @@protoc_insertion_point(namespace_scope)
|
||||
// This line appears within the scope of the file's package namespace, but
|
||||
// outside of any particular class. Another plugin can then specify the
|
||||
// insertion_point "namespace_scope" to generate additional classes or
|
||||
// other declarations that should be placed in this scope.
|
||||
//
|
||||
// Note that if the line containing the insertion point begins with
|
||||
// whitespace, the same whitespace will be added to every line of the
|
||||
// inserted text. This is useful for languages like Python, where
|
||||
// indentation matters. In these languages, the insertion point comment
|
||||
// should be indented the same amount as any inserted code will need to be
|
||||
// in order to work correctly in that context.
|
||||
//
|
||||
// The code generator that generates the initial file and the one which
|
||||
// inserts into it must both run as part of a single invocation of protoc.
|
||||
// Code generators are executed in the order in which they appear on the
|
||||
// command line.
|
||||
//
|
||||
// If |insertion_point| is present, |name| must also be present.
|
||||
optional string insertion_point = 2;
|
||||
|
||||
// The file contents.
|
||||
optional string content = 15;
|
||||
}
|
||||
repeated File file = 15;
|
||||
}
|
813
vendor/github.com/gogo/protobuf/protobuf/google/protobuf/descriptor.proto
generated
vendored
Normal file
813
vendor/github.com/gogo/protobuf/protobuf/google/protobuf/descriptor.proto
generated
vendored
Normal file
@ -0,0 +1,813 @@
|
||||
// Protocol Buffers - Google's data interchange format
|
||||
// Copyright 2008 Google Inc. All rights reserved.
|
||||
// https://developers.google.com/protocol-buffers/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Author: kenton@google.com (Kenton Varda)
|
||||
// Based on original Protocol Buffers design by
|
||||
// Sanjay Ghemawat, Jeff Dean, and others.
|
||||
//
|
||||
// The messages in this file describe the definitions found in .proto files.
|
||||
// A valid .proto file can be translated directly to a FileDescriptorProto
|
||||
// without any other information (e.g. without reading its imports).
|
||||
|
||||
|
||||
syntax = "proto2";
|
||||
|
||||
package google.protobuf;
|
||||
option go_package = "descriptor";
|
||||
option java_package = "com.google.protobuf";
|
||||
option java_outer_classname = "DescriptorProtos";
|
||||
option csharp_namespace = "Google.Protobuf.Reflection";
|
||||
option objc_class_prefix = "GPB";
|
||||
option java_generate_equals_and_hash = true;
|
||||
|
||||
// descriptor.proto must be optimized for speed because reflection-based
|
||||
// algorithms don't work during bootstrapping.
|
||||
option optimize_for = SPEED;
|
||||
|
||||
// The protocol compiler can output a FileDescriptorSet containing the .proto
|
||||
// files it parses.
|
||||
message FileDescriptorSet {
|
||||
repeated FileDescriptorProto file = 1;
|
||||
}
|
||||
|
||||
// Describes a complete .proto file.
|
||||
message FileDescriptorProto {
|
||||
optional string name = 1; // file name, relative to root of source tree
|
||||
optional string package = 2; // e.g. "foo", "foo.bar", etc.
|
||||
|
||||
// Names of files imported by this file.
|
||||
repeated string dependency = 3;
|
||||
// Indexes of the public imported files in the dependency list above.
|
||||
repeated int32 public_dependency = 10;
|
||||
// Indexes of the weak imported files in the dependency list.
|
||||
// For Google-internal migration only. Do not use.
|
||||
repeated int32 weak_dependency = 11;
|
||||
|
||||
// All top-level definitions in this file.
|
||||
repeated DescriptorProto message_type = 4;
|
||||
repeated EnumDescriptorProto enum_type = 5;
|
||||
repeated ServiceDescriptorProto service = 6;
|
||||
repeated FieldDescriptorProto extension = 7;
|
||||
|
||||
optional FileOptions options = 8;
|
||||
|
||||
// This field contains optional information about the original source code.
|
||||
// You may safely remove this entire field without harming runtime
|
||||
// functionality of the descriptors -- the information is needed only by
|
||||
// development tools.
|
||||
optional SourceCodeInfo source_code_info = 9;
|
||||
|
||||
// The syntax of the proto file.
|
||||
// The supported values are "proto2" and "proto3".
|
||||
optional string syntax = 12;
|
||||
}
|
||||
|
||||
// Describes a message type.
|
||||
message DescriptorProto {
|
||||
optional string name = 1;
|
||||
|
||||
repeated FieldDescriptorProto field = 2;
|
||||
repeated FieldDescriptorProto extension = 6;
|
||||
|
||||
repeated DescriptorProto nested_type = 3;
|
||||
repeated EnumDescriptorProto enum_type = 4;
|
||||
|
||||
message ExtensionRange {
|
||||
optional int32 start = 1;
|
||||
optional int32 end = 2;
|
||||
}
|
||||
repeated ExtensionRange extension_range = 5;
|
||||
|
||||
repeated OneofDescriptorProto oneof_decl = 8;
|
||||
|
||||
optional MessageOptions options = 7;
|
||||
|
||||
// Range of reserved tag numbers. Reserved tag numbers may not be used by
|
||||
// fields or extension ranges in the same message. Reserved ranges may
|
||||
// not overlap.
|
||||
message ReservedRange {
|
||||
optional int32 start = 1; // Inclusive.
|
||||
optional int32 end = 2; // Exclusive.
|
||||
}
|
||||
repeated ReservedRange reserved_range = 9;
|
||||
// Reserved field names, which may not be used by fields in the same message.
|
||||
// A given name may only be reserved once.
|
||||
repeated string reserved_name = 10;
|
||||
}
|
||||
|
||||
// Describes a field within a message.
|
||||
message FieldDescriptorProto {
|
||||
enum Type {
|
||||
// 0 is reserved for errors.
|
||||
// Order is weird for historical reasons.
|
||||
TYPE_DOUBLE = 1;
|
||||
TYPE_FLOAT = 2;
|
||||
// Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if
|
||||
// negative values are likely.
|
||||
TYPE_INT64 = 3;
|
||||
TYPE_UINT64 = 4;
|
||||
// Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if
|
||||
// negative values are likely.
|
||||
TYPE_INT32 = 5;
|
||||
TYPE_FIXED64 = 6;
|
||||
TYPE_FIXED32 = 7;
|
||||
TYPE_BOOL = 8;
|
||||
TYPE_STRING = 9;
|
||||
TYPE_GROUP = 10; // Tag-delimited aggregate.
|
||||
TYPE_MESSAGE = 11; // Length-delimited aggregate.
|
||||
|
||||
// New in version 2.
|
||||
TYPE_BYTES = 12;
|
||||
TYPE_UINT32 = 13;
|
||||
TYPE_ENUM = 14;
|
||||
TYPE_SFIXED32 = 15;
|
||||
TYPE_SFIXED64 = 16;
|
||||
TYPE_SINT32 = 17; // Uses ZigZag encoding.
|
||||
TYPE_SINT64 = 18; // Uses ZigZag encoding.
|
||||
};
|
||||
|
||||
enum Label {
|
||||
// 0 is reserved for errors
|
||||
LABEL_OPTIONAL = 1;
|
||||
LABEL_REQUIRED = 2;
|
||||
LABEL_REPEATED = 3;
|
||||
// TODO(sanjay): Should we add LABEL_MAP?
|
||||
};
|
||||
|
||||
optional string name = 1;
|
||||
optional int32 number = 3;
|
||||
optional Label label = 4;
|
||||
|
||||
// If type_name is set, this need not be set. If both this and type_name
|
||||
// are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP.
|
||||
optional Type type = 5;
|
||||
|
||||
// For message and enum types, this is the name of the type. If the name
|
||||
// starts with a '.', it is fully-qualified. Otherwise, C++-like scoping
|
||||
// rules are used to find the type (i.e. first the nested types within this
|
||||
// message are searched, then within the parent, on up to the root
|
||||
// namespace).
|
||||
optional string type_name = 6;
|
||||
|
||||
// For extensions, this is the name of the type being extended. It is
|
||||
// resolved in the same manner as type_name.
|
||||
optional string extendee = 2;
|
||||
|
||||
// For numeric types, contains the original text representation of the value.
|
||||
// For booleans, "true" or "false".
|
||||
// For strings, contains the default text contents (not escaped in any way).
|
||||
// For bytes, contains the C escaped value. All bytes >= 128 are escaped.
|
||||
// TODO(kenton): Base-64 encode?
|
||||
optional string default_value = 7;
|
||||
|
||||
// If set, gives the index of a oneof in the containing type's oneof_decl
|
||||
// list. This field is a member of that oneof.
|
||||
optional int32 oneof_index = 9;
|
||||
|
||||
// JSON name of this field. The value is set by protocol compiler. If the
|
||||
// user has set a "json_name" option on this field, that option's value
|
||||
// will be used. Otherwise, it's deduced from the field's name by converting
|
||||
// it to camelCase.
|
||||
optional string json_name = 10;
|
||||
|
||||
optional FieldOptions options = 8;
|
||||
}
|
||||
|
||||
// Describes a oneof.
|
||||
message OneofDescriptorProto {
|
||||
optional string name = 1;
|
||||
optional OneofOptions options = 2;
|
||||
}
|
||||
|
||||
// Describes an enum type.
|
||||
message EnumDescriptorProto {
|
||||
optional string name = 1;
|
||||
|
||||
repeated EnumValueDescriptorProto value = 2;
|
||||
|
||||
optional EnumOptions options = 3;
|
||||
}
|
||||
|
||||
// Describes a value within an enum.
|
||||
message EnumValueDescriptorProto {
|
||||
optional string name = 1;
|
||||
optional int32 number = 2;
|
||||
|
||||
optional EnumValueOptions options = 3;
|
||||
}
|
||||
|
||||
// Describes a service.
|
||||
message ServiceDescriptorProto {
|
||||
optional string name = 1;
|
||||
repeated MethodDescriptorProto method = 2;
|
||||
|
||||
optional ServiceOptions options = 3;
|
||||
}
|
||||
|
||||
// Describes a method of a service.
|
||||
message MethodDescriptorProto {
|
||||
optional string name = 1;
|
||||
|
||||
// Input and output type names. These are resolved in the same way as
|
||||
// FieldDescriptorProto.type_name, but must refer to a message type.
|
||||
optional string input_type = 2;
|
||||
optional string output_type = 3;
|
||||
|
||||
optional MethodOptions options = 4;
|
||||
|
||||
// Identifies if client streams multiple client messages
|
||||
optional bool client_streaming = 5 [default=false];
|
||||
// Identifies if server streams multiple server messages
|
||||
optional bool server_streaming = 6 [default=false];
|
||||
}
|
||||
|
||||
|
||||
// ===================================================================
|
||||
// Options
|
||||
|
||||
// Each of the definitions above may have "options" attached. These are
|
||||
// just annotations which may cause code to be generated slightly differently
|
||||
// or may contain hints for code that manipulates protocol messages.
|
||||
//
|
||||
// Clients may define custom options as extensions of the *Options messages.
|
||||
// These extensions may not yet be known at parsing time, so the parser cannot
|
||||
// store the values in them. Instead it stores them in a field in the *Options
|
||||
// message called uninterpreted_option. This field must have the same name
|
||||
// across all *Options messages. We then use this field to populate the
|
||||
// extensions when we build a descriptor, at which point all protos have been
|
||||
// parsed and so all extensions are known.
|
||||
//
|
||||
// Extension numbers for custom options may be chosen as follows:
|
||||
// * For options which will only be used within a single application or
|
||||
// organization, or for experimental options, use field numbers 50000
|
||||
// through 99999. It is up to you to ensure that you do not use the
|
||||
// same number for multiple options.
|
||||
// * For options which will be published and used publicly by multiple
|
||||
// independent entities, e-mail protobuf-global-extension-registry@google.com
|
||||
// to reserve extension numbers. Simply provide your project name (e.g.
|
||||
// Objective-C plugin) and your project website (if available) -- there's no
|
||||
// need to explain how you intend to use them. Usually you only need one
|
||||
// extension number. You can declare multiple options with only one extension
|
||||
// number by putting them in a sub-message. See the Custom Options section of
|
||||
// the docs for examples:
|
||||
// https://developers.google.com/protocol-buffers/docs/proto#options
|
||||
// If this turns out to be popular, a web service will be set up
|
||||
// to automatically assign option numbers.
|
||||
|
||||
|
||||
message FileOptions {
|
||||
|
||||
// Sets the Java package where classes generated from this .proto will be
|
||||
// placed. By default, the proto package is used, but this is often
|
||||
// inappropriate because proto packages do not normally start with backwards
|
||||
// domain names.
|
||||
optional string java_package = 1;
|
||||
|
||||
|
||||
// If set, all the classes from the .proto file are wrapped in a single
|
||||
// outer class with the given name. This applies to both Proto1
|
||||
// (equivalent to the old "--one_java_file" option) and Proto2 (where
|
||||
// a .proto always translates to a single class, but you may want to
|
||||
// explicitly choose the class name).
|
||||
optional string java_outer_classname = 8;
|
||||
|
||||
// If set true, then the Java code generator will generate a separate .java
|
||||
// file for each top-level message, enum, and service defined in the .proto
|
||||
// file. Thus, these types will *not* be nested inside the outer class
|
||||
// named by java_outer_classname. However, the outer class will still be
|
||||
// generated to contain the file's getDescriptor() method as well as any
|
||||
// top-level extensions defined in the file.
|
||||
optional bool java_multiple_files = 10 [default=false];
|
||||
|
||||
// If set true, then the Java code generator will generate equals() and
|
||||
// hashCode() methods for all messages defined in the .proto file.
|
||||
// This increases generated code size, potentially substantially for large
|
||||
// protos, which may harm a memory-constrained application.
|
||||
// - In the full runtime this is a speed optimization, as the
|
||||
// AbstractMessage base class includes reflection-based implementations of
|
||||
// these methods.
|
||||
// - In the lite runtime, setting this option changes the semantics of
|
||||
// equals() and hashCode() to more closely match those of the full runtime;
|
||||
// the generated methods compute their results based on field values rather
|
||||
// than object identity. (Implementations should not assume that hashcodes
|
||||
// will be consistent across runtimes or versions of the protocol compiler.)
|
||||
optional bool java_generate_equals_and_hash = 20 [default=false];
|
||||
|
||||
// If set true, then the Java2 code generator will generate code that
|
||||
// throws an exception whenever an attempt is made to assign a non-UTF-8
|
||||
// byte sequence to a string field.
|
||||
// Message reflection will do the same.
|
||||
// However, an extension field still accepts non-UTF-8 byte sequences.
|
||||
// This option has no effect on when used with the lite runtime.
|
||||
optional bool java_string_check_utf8 = 27 [default=false];
|
||||
|
||||
|
||||
// Generated classes can be optimized for speed or code size.
|
||||
enum OptimizeMode {
|
||||
SPEED = 1; // Generate complete code for parsing, serialization,
|
||||
// etc.
|
||||
CODE_SIZE = 2; // Use ReflectionOps to implement these methods.
|
||||
LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime.
|
||||
}
|
||||
optional OptimizeMode optimize_for = 9 [default=SPEED];
|
||||
|
||||
// Sets the Go package where structs generated from this .proto will be
|
||||
// placed. If omitted, the Go package will be derived from the following:
|
||||
// - The basename of the package import path, if provided.
|
||||
// - Otherwise, the package statement in the .proto file, if present.
|
||||
// - Otherwise, the basename of the .proto file, without extension.
|
||||
optional string go_package = 11;
|
||||
|
||||
|
||||
|
||||
// Should generic services be generated in each language? "Generic" services
|
||||
// are not specific to any particular RPC system. They are generated by the
|
||||
// main code generators in each language (without additional plugins).
|
||||
// Generic services were the only kind of service generation supported by
|
||||
// early versions of google.protobuf.
|
||||
//
|
||||
// Generic services are now considered deprecated in favor of using plugins
|
||||
// that generate code specific to your particular RPC system. Therefore,
|
||||
// these default to false. Old code which depends on generic services should
|
||||
// explicitly set them to true.
|
||||
optional bool cc_generic_services = 16 [default=false];
|
||||
optional bool java_generic_services = 17 [default=false];
|
||||
optional bool py_generic_services = 18 [default=false];
|
||||
|
||||
// Is this file deprecated?
|
||||
// Depending on the target platform, this can emit Deprecated annotations
|
||||
// for everything in the file, or it will be completely ignored; in the very
|
||||
// least, this is a formalization for deprecating files.
|
||||
optional bool deprecated = 23 [default=false];
|
||||
|
||||
// Enables the use of arenas for the proto messages in this file. This applies
|
||||
// only to generated classes for C++.
|
||||
optional bool cc_enable_arenas = 31 [default=false];
|
||||
|
||||
|
||||
// Sets the objective c class prefix which is prepended to all objective c
|
||||
// generated classes from this .proto. There is no default.
|
||||
optional string objc_class_prefix = 36;
|
||||
|
||||
// Namespace for generated classes; defaults to the package.
|
||||
optional string csharp_namespace = 37;
|
||||
|
||||
// The parser stores options it doesn't recognize here. See above.
|
||||
repeated UninterpretedOption uninterpreted_option = 999;
|
||||
|
||||
// Clients can define custom options in extensions of this message. See above.
|
||||
extensions 1000 to max;
|
||||
|
||||
//reserved 38;
|
||||
}
|
||||
|
||||
message MessageOptions {
|
||||
// Set true to use the old proto1 MessageSet wire format for extensions.
|
||||
// This is provided for backwards-compatibility with the MessageSet wire
|
||||
// format. You should not use this for any other reason: It's less
|
||||
// efficient, has fewer features, and is more complicated.
|
||||
//
|
||||
// The message must be defined exactly as follows:
|
||||
// message Foo {
|
||||
// option message_set_wire_format = true;
|
||||
// extensions 4 to max;
|
||||
// }
|
||||
// Note that the message cannot have any defined fields; MessageSets only
|
||||
// have extensions.
|
||||
//
|
||||
// All extensions of your type must be singular messages; e.g. they cannot
|
||||
// be int32s, enums, or repeated messages.
|
||||
//
|
||||
// Because this is an option, the above two restrictions are not enforced by
|
||||
// the protocol compiler.
|
||||
optional bool message_set_wire_format = 1 [default=false];
|
||||
|
||||
// Disables the generation of the standard "descriptor()" accessor, which can
|
||||
// conflict with a field of the same name. This is meant to make migration
|
||||
// from proto1 easier; new code should avoid fields named "descriptor".
|
||||
optional bool no_standard_descriptor_accessor = 2 [default=false];
|
||||
|
||||
// Is this message deprecated?
|
||||
// Depending on the target platform, this can emit Deprecated annotations
|
||||
// for the message, or it will be completely ignored; in the very least,
|
||||
// this is a formalization for deprecating messages.
|
||||
optional bool deprecated = 3 [default=false];
|
||||
|
||||
// Whether the message is an automatically generated map entry type for the
|
||||
// maps field.
|
||||
//
|
||||
// For maps fields:
|
||||
// map<KeyType, ValueType> map_field = 1;
|
||||
// The parsed descriptor looks like:
|
||||
// message MapFieldEntry {
|
||||
// option map_entry = true;
|
||||
// optional KeyType key = 1;
|
||||
// optional ValueType value = 2;
|
||||
// }
|
||||
// repeated MapFieldEntry map_field = 1;
|
||||
//
|
||||
// Implementations may choose not to generate the map_entry=true message, but
|
||||
// use a native map in the target language to hold the keys and values.
|
||||
// The reflection APIs in such implementions still need to work as
|
||||
// if the field is a repeated message field.
|
||||
//
|
||||
// NOTE: Do not set the option in .proto files. Always use the maps syntax
|
||||
// instead. The option should only be implicitly set by the proto compiler
|
||||
// parser.
|
||||
optional bool map_entry = 7;
|
||||
|
||||
// The parser stores options it doesn't recognize here. See above.
|
||||
repeated UninterpretedOption uninterpreted_option = 999;
|
||||
|
||||
// Clients can define custom options in extensions of this message. See above.
|
||||
extensions 1000 to max;
|
||||
}
|
||||
|
||||
message FieldOptions {
|
||||
// The ctype option instructs the C++ code generator to use a different
|
||||
// representation of the field than it normally would. See the specific
|
||||
// options below. This option is not yet implemented in the open source
|
||||
// release -- sorry, we'll try to include it in a future version!
|
||||
optional CType ctype = 1 [default = STRING];
|
||||
enum CType {
|
||||
// Default mode.
|
||||
STRING = 0;
|
||||
|
||||
CORD = 1;
|
||||
|
||||
STRING_PIECE = 2;
|
||||
}
|
||||
// The packed option can be enabled for repeated primitive fields to enable
|
||||
// a more efficient representation on the wire. Rather than repeatedly
|
||||
// writing the tag and type for each element, the entire array is encoded as
|
||||
// a single length-delimited blob. In proto3, only explicit setting it to
|
||||
// false will avoid using packed encoding.
|
||||
optional bool packed = 2;
|
||||
|
||||
|
||||
// The jstype option determines the JavaScript type used for values of the
|
||||
// field. The option is permitted only for 64 bit integral and fixed types
|
||||
// (int64, uint64, sint64, fixed64, sfixed64). By default these types are
|
||||
// represented as JavaScript strings. This avoids loss of precision that can
|
||||
// happen when a large value is converted to a floating point JavaScript
|
||||
// numbers. Specifying JS_NUMBER for the jstype causes the generated
|
||||
// JavaScript code to use the JavaScript "number" type instead of strings.
|
||||
// This option is an enum to permit additional types to be added,
|
||||
// e.g. goog.math.Integer.
|
||||
optional JSType jstype = 6 [default = JS_NORMAL];
|
||||
enum JSType {
|
||||
// Use the default type.
|
||||
JS_NORMAL = 0;
|
||||
|
||||
// Use JavaScript strings.
|
||||
JS_STRING = 1;
|
||||
|
||||
// Use JavaScript numbers.
|
||||
JS_NUMBER = 2;
|
||||
}
|
||||
|
||||
// Should this field be parsed lazily? Lazy applies only to message-type
|
||||
// fields. It means that when the outer message is initially parsed, the
|
||||
// inner message's contents will not be parsed but instead stored in encoded
|
||||
// form. The inner message will actually be parsed when it is first accessed.
|
||||
//
|
||||
// This is only a hint. Implementations are free to choose whether to use
|
||||
// eager or lazy parsing regardless of the value of this option. However,
|
||||
// setting this option true suggests that the protocol author believes that
|
||||
// using lazy parsing on this field is worth the additional bookkeeping
|
||||
// overhead typically needed to implement it.
|
||||
//
|
||||
// This option does not affect the public interface of any generated code;
|
||||
// all method signatures remain the same. Furthermore, thread-safety of the
|
||||
// interface is not affected by this option; const methods remain safe to
|
||||
// call from multiple threads concurrently, while non-const methods continue
|
||||
// to require exclusive access.
|
||||
//
|
||||
//
|
||||
// Note that implementations may choose not to check required fields within
|
||||
// a lazy sub-message. That is, calling IsInitialized() on the outher message
|
||||
// may return true even if the inner message has missing required fields.
|
||||
// This is necessary because otherwise the inner message would have to be
|
||||
// parsed in order to perform the check, defeating the purpose of lazy
|
||||
// parsing. An implementation which chooses not to check required fields
|
||||
// must be consistent about it. That is, for any particular sub-message, the
|
||||
// implementation must either *always* check its required fields, or *never*
|
||||
// check its required fields, regardless of whether or not the message has
|
||||
// been parsed.
|
||||
optional bool lazy = 5 [default=false];
|
||||
|
||||
// Is this field deprecated?
|
||||
// Depending on the target platform, this can emit Deprecated annotations
|
||||
// for accessors, or it will be completely ignored; in the very least, this
|
||||
// is a formalization for deprecating fields.
|
||||
optional bool deprecated = 3 [default=false];
|
||||
|
||||
// For Google-internal migration only. Do not use.
|
||||
optional bool weak = 10 [default=false];
|
||||
|
||||
|
||||
// The parser stores options it doesn't recognize here. See above.
|
||||
repeated UninterpretedOption uninterpreted_option = 999;
|
||||
|
||||
// Clients can define custom options in extensions of this message. See above.
|
||||
extensions 1000 to max;
|
||||
}
|
||||
|
||||
message OneofOptions {
|
||||
// The parser stores options it doesn't recognize here. See above.
|
||||
repeated UninterpretedOption uninterpreted_option = 999;
|
||||
|
||||
// Clients can define custom options in extensions of this message. See above.
|
||||
extensions 1000 to max;
|
||||
}
|
||||
|
||||
message EnumOptions {
|
||||
|
||||
// Set this option to true to allow mapping different tag names to the same
|
||||
// value.
|
||||
optional bool allow_alias = 2;
|
||||
|
||||
// Is this enum deprecated?
|
||||
// Depending on the target platform, this can emit Deprecated annotations
|
||||
// for the enum, or it will be completely ignored; in the very least, this
|
||||
// is a formalization for deprecating enums.
|
||||
optional bool deprecated = 3 [default=false];
|
||||
|
||||
// The parser stores options it doesn't recognize here. See above.
|
||||
repeated UninterpretedOption uninterpreted_option = 999;
|
||||
|
||||
// Clients can define custom options in extensions of this message. See above.
|
||||
extensions 1000 to max;
|
||||
}
|
||||
|
||||
message EnumValueOptions {
|
||||
// Is this enum value deprecated?
|
||||
// Depending on the target platform, this can emit Deprecated annotations
|
||||
// for the enum value, or it will be completely ignored; in the very least,
|
||||
// this is a formalization for deprecating enum values.
|
||||
optional bool deprecated = 1 [default=false];
|
||||
|
||||
// The parser stores options it doesn't recognize here. See above.
|
||||
repeated UninterpretedOption uninterpreted_option = 999;
|
||||
|
||||
// Clients can define custom options in extensions of this message. See above.
|
||||
extensions 1000 to max;
|
||||
}
|
||||
|
||||
message ServiceOptions {
|
||||
|
||||
// Note: Field numbers 1 through 32 are reserved for Google's internal RPC
|
||||
// framework. We apologize for hoarding these numbers to ourselves, but
|
||||
// we were already using them long before we decided to release Protocol
|
||||
// Buffers.
|
||||
|
||||
// Is this service deprecated?
|
||||
// Depending on the target platform, this can emit Deprecated annotations
|
||||
// for the service, or it will be completely ignored; in the very least,
|
||||
// this is a formalization for deprecating services.
|
||||
optional bool deprecated = 33 [default=false];
|
||||
|
||||
// The parser stores options it doesn't recognize here. See above.
|
||||
repeated UninterpretedOption uninterpreted_option = 999;
|
||||
|
||||
// Clients can define custom options in extensions of this message. See above.
|
||||
extensions 1000 to max;
|
||||
}
|
||||
|
||||
message MethodOptions {
|
||||
|
||||
// Note: Field numbers 1 through 32 are reserved for Google's internal RPC
|
||||
// framework. We apologize for hoarding these numbers to ourselves, but
|
||||
// we were already using them long before we decided to release Protocol
|
||||
// Buffers.
|
||||
|
||||
// Is this method deprecated?
|
||||
// Depending on the target platform, this can emit Deprecated annotations
|
||||
// for the method, or it will be completely ignored; in the very least,
|
||||
// this is a formalization for deprecating methods.
|
||||
optional bool deprecated = 33 [default=false];
|
||||
|
||||
// The parser stores options it doesn't recognize here. See above.
|
||||
repeated UninterpretedOption uninterpreted_option = 999;
|
||||
|
||||
// Clients can define custom options in extensions of this message. See above.
|
||||
extensions 1000 to max;
|
||||
}
|
||||
|
||||
|
||||
// A message representing a option the parser does not recognize. This only
|
||||
// appears in options protos created by the compiler::Parser class.
|
||||
// DescriptorPool resolves these when building Descriptor objects. Therefore,
|
||||
// options protos in descriptor objects (e.g. returned by Descriptor::options(),
|
||||
// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions
|
||||
// in them.
|
||||
message UninterpretedOption {
|
||||
// The name of the uninterpreted option. Each string represents a segment in
|
||||
// a dot-separated name. is_extension is true iff a segment represents an
|
||||
// extension (denoted with parentheses in options specs in .proto files).
|
||||
// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents
|
||||
// "foo.(bar.baz).qux".
|
||||
message NamePart {
|
||||
required string name_part = 1;
|
||||
required bool is_extension = 2;
|
||||
}
|
||||
repeated NamePart name = 2;
|
||||
|
||||
// The value of the uninterpreted option, in whatever type the tokenizer
|
||||
// identified it as during parsing. Exactly one of these should be set.
|
||||
optional string identifier_value = 3;
|
||||
optional uint64 positive_int_value = 4;
|
||||
optional int64 negative_int_value = 5;
|
||||
optional double double_value = 6;
|
||||
optional bytes string_value = 7;
|
||||
optional string aggregate_value = 8;
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
// Optional source code info
|
||||
|
||||
// Encapsulates information about the original source file from which a
|
||||
// FileDescriptorProto was generated.
|
||||
message SourceCodeInfo {
|
||||
// A Location identifies a piece of source code in a .proto file which
|
||||
// corresponds to a particular definition. This information is intended
|
||||
// to be useful to IDEs, code indexers, documentation generators, and similar
|
||||
// tools.
|
||||
//
|
||||
// For example, say we have a file like:
|
||||
// message Foo {
|
||||
// optional string foo = 1;
|
||||
// }
|
||||
// Let's look at just the field definition:
|
||||
// optional string foo = 1;
|
||||
// ^ ^^ ^^ ^ ^^^
|
||||
// a bc de f ghi
|
||||
// We have the following locations:
|
||||
// span path represents
|
||||
// [a,i) [ 4, 0, 2, 0 ] The whole field definition.
|
||||
// [a,b) [ 4, 0, 2, 0, 4 ] The label (optional).
|
||||
// [c,d) [ 4, 0, 2, 0, 5 ] The type (string).
|
||||
// [e,f) [ 4, 0, 2, 0, 1 ] The name (foo).
|
||||
// [g,h) [ 4, 0, 2, 0, 3 ] The number (1).
|
||||
//
|
||||
// Notes:
|
||||
// - A location may refer to a repeated field itself (i.e. not to any
|
||||
// particular index within it). This is used whenever a set of elements are
|
||||
// logically enclosed in a single code segment. For example, an entire
|
||||
// extend block (possibly containing multiple extension definitions) will
|
||||
// have an outer location whose path refers to the "extensions" repeated
|
||||
// field without an index.
|
||||
// - Multiple locations may have the same path. This happens when a single
|
||||
// logical declaration is spread out across multiple places. The most
|
||||
// obvious example is the "extend" block again -- there may be multiple
|
||||
// extend blocks in the same scope, each of which will have the same path.
|
||||
// - A location's span is not always a subset of its parent's span. For
|
||||
// example, the "extendee" of an extension declaration appears at the
|
||||
// beginning of the "extend" block and is shared by all extensions within
|
||||
// the block.
|
||||
// - Just because a location's span is a subset of some other location's span
|
||||
// does not mean that it is a descendent. For example, a "group" defines
|
||||
// both a type and a field in a single declaration. Thus, the locations
|
||||
// corresponding to the type and field and their components will overlap.
|
||||
// - Code which tries to interpret locations should probably be designed to
|
||||
// ignore those that it doesn't understand, as more types of locations could
|
||||
// be recorded in the future.
|
||||
repeated Location location = 1;
|
||||
message Location {
|
||||
// Identifies which part of the FileDescriptorProto was defined at this
|
||||
// location.
|
||||
//
|
||||
// Each element is a field number or an index. They form a path from
|
||||
// the root FileDescriptorProto to the place where the definition. For
|
||||
// example, this path:
|
||||
// [ 4, 3, 2, 7, 1 ]
|
||||
// refers to:
|
||||
// file.message_type(3) // 4, 3
|
||||
// .field(7) // 2, 7
|
||||
// .name() // 1
|
||||
// This is because FileDescriptorProto.message_type has field number 4:
|
||||
// repeated DescriptorProto message_type = 4;
|
||||
// and DescriptorProto.field has field number 2:
|
||||
// repeated FieldDescriptorProto field = 2;
|
||||
// and FieldDescriptorProto.name has field number 1:
|
||||
// optional string name = 1;
|
||||
//
|
||||
// Thus, the above path gives the location of a field name. If we removed
|
||||
// the last element:
|
||||
// [ 4, 3, 2, 7 ]
|
||||
// this path refers to the whole field declaration (from the beginning
|
||||
// of the label to the terminating semicolon).
|
||||
repeated int32 path = 1 [packed=true];
|
||||
|
||||
// Always has exactly three or four elements: start line, start column,
|
||||
// end line (optional, otherwise assumed same as start line), end column.
|
||||
// These are packed into a single field for efficiency. Note that line
|
||||
// and column numbers are zero-based -- typically you will want to add
|
||||
// 1 to each before displaying to a user.
|
||||
repeated int32 span = 2 [packed=true];
|
||||
|
||||
// If this SourceCodeInfo represents a complete declaration, these are any
|
||||
// comments appearing before and after the declaration which appear to be
|
||||
// attached to the declaration.
|
||||
//
|
||||
// A series of line comments appearing on consecutive lines, with no other
|
||||
// tokens appearing on those lines, will be treated as a single comment.
|
||||
//
|
||||
// leading_detached_comments will keep paragraphs of comments that appear
|
||||
// before (but not connected to) the current element. Each paragraph,
|
||||
// separated by empty lines, will be one comment element in the repeated
|
||||
// field.
|
||||
//
|
||||
// Only the comment content is provided; comment markers (e.g. //) are
|
||||
// stripped out. For block comments, leading whitespace and an asterisk
|
||||
// will be stripped from the beginning of each line other than the first.
|
||||
// Newlines are included in the output.
|
||||
//
|
||||
// Examples:
|
||||
//
|
||||
// optional int32 foo = 1; // Comment attached to foo.
|
||||
// // Comment attached to bar.
|
||||
// optional int32 bar = 2;
|
||||
//
|
||||
// optional string baz = 3;
|
||||
// // Comment attached to baz.
|
||||
// // Another line attached to baz.
|
||||
//
|
||||
// // Comment attached to qux.
|
||||
// //
|
||||
// // Another line attached to qux.
|
||||
// optional double qux = 4;
|
||||
//
|
||||
// // Detached comment for corge. This is not leading or trailing comments
|
||||
// // to qux or corge because there are blank lines separating it from
|
||||
// // both.
|
||||
//
|
||||
// // Detached comment for corge paragraph 2.
|
||||
//
|
||||
// optional string corge = 5;
|
||||
// /* Block comment attached
|
||||
// * to corge. Leading asterisks
|
||||
// * will be removed. */
|
||||
// /* Block comment attached to
|
||||
// * grault. */
|
||||
// optional int32 grault = 6;
|
||||
//
|
||||
// // ignored detached comments.
|
||||
optional string leading_comments = 3;
|
||||
optional string trailing_comments = 4;
|
||||
repeated string leading_detached_comments = 6;
|
||||
}
|
||||
}
|
||||
|
||||
// Describes the relationship between generated code and its original source
|
||||
// file. A GeneratedCodeInfo message is associated with only one generated
|
||||
// source file, but may contain references to different source .proto files.
|
||||
message GeneratedCodeInfo {
|
||||
// An Annotation connects some span of text in generated code to an element
|
||||
// of its generating .proto file.
|
||||
repeated Annotation annotation = 1;
|
||||
message Annotation {
|
||||
// Identifies the element in the original source .proto file. This field
|
||||
// is formatted the same as SourceCodeInfo.Location.path.
|
||||
repeated int32 path = 1 [packed=true];
|
||||
|
||||
// Identifies the filesystem path to the original source .proto.
|
||||
optional string source_file = 2;
|
||||
|
||||
// Identifies the starting offset in bytes in the generated code
|
||||
// that relates to the identified object.
|
||||
optional int32 begin = 3;
|
||||
|
||||
// Identifies the ending offset in bytes in the generated code that
|
||||
// relates to the identified offset. The end offset should be one past
|
||||
// the last relevant byte (so the length of the text = end - begin).
|
||||
optional int32 end = 4;
|
||||
}
|
||||
}
|
98
vendor/github.com/gogo/protobuf/protobuf/google/protobuf/duration.proto
generated
vendored
Normal file
98
vendor/github.com/gogo/protobuf/protobuf/google/protobuf/duration.proto
generated
vendored
Normal file
@ -0,0 +1,98 @@
|
||||
// Protocol Buffers - Google's data interchange format
|
||||
// Copyright 2008 Google Inc. All rights reserved.
|
||||
// https://developers.google.com/protocol-buffers/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.protobuf;
|
||||
|
||||
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
|
||||
option go_package = "types";
|
||||
option java_package = "com.google.protobuf";
|
||||
option java_outer_classname = "DurationProto";
|
||||
option java_multiple_files = true;
|
||||
option java_generate_equals_and_hash = true;
|
||||
option objc_class_prefix = "GPB";
|
||||
|
||||
// A Duration represents a signed, fixed-length span of time represented
|
||||
// as a count of seconds and fractions of seconds at nanosecond
|
||||
// resolution. It is independent of any calendar and concepts like "day"
|
||||
// or "month". It is related to Timestamp in that the difference between
|
||||
// two Timestamp values is a Duration and it can be added or subtracted
|
||||
// from a Timestamp. Range is approximately +-10,000 years.
|
||||
//
|
||||
// Example 1: Compute Duration from two Timestamps in pseudo code.
|
||||
//
|
||||
// Timestamp start = ...;
|
||||
// Timestamp end = ...;
|
||||
// Duration duration = ...;
|
||||
//
|
||||
// duration.seconds = end.seconds - start.seconds;
|
||||
// duration.nanos = end.nanos - start.nanos;
|
||||
//
|
||||
// if (duration.seconds < 0 && duration.nanos > 0) {
|
||||
// duration.seconds += 1;
|
||||
// duration.nanos -= 1000000000;
|
||||
// } else if (durations.seconds > 0 && duration.nanos < 0) {
|
||||
// duration.seconds -= 1;
|
||||
// duration.nanos += 1000000000;
|
||||
// }
|
||||
//
|
||||
// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
|
||||
//
|
||||
// Timestamp start = ...;
|
||||
// Duration duration = ...;
|
||||
// Timestamp end = ...;
|
||||
//
|
||||
// end.seconds = start.seconds + duration.seconds;
|
||||
// end.nanos = start.nanos + duration.nanos;
|
||||
//
|
||||
// if (end.nanos < 0) {
|
||||
// end.seconds -= 1;
|
||||
// end.nanos += 1000000000;
|
||||
// } else if (end.nanos >= 1000000000) {
|
||||
// end.seconds += 1;
|
||||
// end.nanos -= 1000000000;
|
||||
// }
|
||||
//
|
||||
//
|
||||
message Duration {
|
||||
|
||||
// Signed seconds of the span of time. Must be from -315,576,000,000
|
||||
// to +315,576,000,000 inclusive.
|
||||
int64 seconds = 1;
|
||||
|
||||
// Signed fractions of a second at nanosecond resolution of the span
|
||||
// of time. Durations less than one second are represented with a 0
|
||||
// `seconds` field and a positive or negative `nanos` field. For durations
|
||||
// of one second or more, a non-zero value for the `nanos` field must be
|
||||
// of the same sign as the `seconds` field. Must be from -999,999,999
|
||||
// to +999,999,999 inclusive.
|
||||
int32 nanos = 2;
|
||||
}
|
53
vendor/github.com/gogo/protobuf/protobuf/google/protobuf/empty.proto
generated
vendored
Normal file
53
vendor/github.com/gogo/protobuf/protobuf/google/protobuf/empty.proto
generated
vendored
Normal file
@ -0,0 +1,53 @@
|
||||
// Protocol Buffers - Google's data interchange format
|
||||
// Copyright 2008 Google Inc. All rights reserved.
|
||||
// https://developers.google.com/protocol-buffers/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.protobuf;
|
||||
|
||||
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
|
||||
option go_package = "types";
|
||||
option java_package = "com.google.protobuf";
|
||||
option java_outer_classname = "EmptyProto";
|
||||
option java_multiple_files = true;
|
||||
option java_generate_equals_and_hash = true;
|
||||
option objc_class_prefix = "GPB";
|
||||
option cc_enable_arenas = true;
|
||||
|
||||
// A generic empty message that you can re-use to avoid defining duplicated
|
||||
// empty messages in your APIs. A typical example is to use it as the request
|
||||
// or the response type of an API method. For instance:
|
||||
//
|
||||
// service Foo {
|
||||
// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
|
||||
// }
|
||||
//
|
||||
// The JSON representation for `Empty` is empty JSON object `{}`.
|
||||
message Empty {}
|
246
vendor/github.com/gogo/protobuf/protobuf/google/protobuf/field_mask.proto
generated
vendored
Normal file
246
vendor/github.com/gogo/protobuf/protobuf/google/protobuf/field_mask.proto
generated
vendored
Normal file
@ -0,0 +1,246 @@
|
||||
// Protocol Buffers - Google's data interchange format
|
||||
// Copyright 2008 Google Inc. All rights reserved.
|
||||
// https://developers.google.com/protocol-buffers/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.protobuf;
|
||||
|
||||
option csharp_namespace = "Google.Protobuf.WellKnownTypes"; option go_package = "types";
|
||||
option java_package = "com.google.protobuf";
|
||||
option java_outer_classname = "FieldMaskProto";
|
||||
option java_multiple_files = true;
|
||||
option objc_class_prefix = "GPB";
|
||||
option java_generate_equals_and_hash = true;
|
||||
|
||||
// `FieldMask` represents a set of symbolic field paths, for example:
|
||||
//
|
||||
// paths: "f.a"
|
||||
// paths: "f.b.d"
|
||||
//
|
||||
// Here `f` represents a field in some root message, `a` and `b`
|
||||
// fields in the message found in `f`, and `d` a field found in the
|
||||
// message in `f.b`.
|
||||
//
|
||||
// Field masks are used to specify a subset of fields that should be
|
||||
// returned by a get operation or modified by an update operation.
|
||||
// Field masks also have a custom JSON encoding (see below).
|
||||
//
|
||||
// # Field Masks in Projections
|
||||
//
|
||||
// When used in the context of a projection, a response message or
|
||||
// sub-message is filtered by the API to only contain those fields as
|
||||
// specified in the mask. For example, if the mask in the previous
|
||||
// example is applied to a response message as follows:
|
||||
//
|
||||
// f {
|
||||
// a : 22
|
||||
// b {
|
||||
// d : 1
|
||||
// x : 2
|
||||
// }
|
||||
// y : 13
|
||||
// }
|
||||
// z: 8
|
||||
//
|
||||
// The result will not contain specific values for fields x,y and z
|
||||
// (their value will be set to the default, and omitted in proto text
|
||||
// output):
|
||||
//
|
||||
//
|
||||
// f {
|
||||
// a : 22
|
||||
// b {
|
||||
// d : 1
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// A repeated field is not allowed except at the last position of a
|
||||
// field mask.
|
||||
//
|
||||
// If a FieldMask object is not present in a get operation, the
|
||||
// operation applies to all fields (as if a FieldMask of all fields
|
||||
// had been specified).
|
||||
//
|
||||
// Note that a field mask does not necessarily apply to the
|
||||
// top-level response message. In case of a REST get operation, the
|
||||
// field mask applies directly to the response, but in case of a REST
|
||||
// list operation, the mask instead applies to each individual message
|
||||
// in the returned resource list. In case of a REST custom method,
|
||||
// other definitions may be used. Where the mask applies will be
|
||||
// clearly documented together with its declaration in the API. In
|
||||
// any case, the effect on the returned resource/resources is required
|
||||
// behavior for APIs.
|
||||
//
|
||||
// # Field Masks in Update Operations
|
||||
//
|
||||
// A field mask in update operations specifies which fields of the
|
||||
// targeted resource are going to be updated. The API is required
|
||||
// to only change the values of the fields as specified in the mask
|
||||
// and leave the others untouched. If a resource is passed in to
|
||||
// describe the updated values, the API ignores the values of all
|
||||
// fields not covered by the mask.
|
||||
//
|
||||
// If a repeated field is specified for an update operation, the existing
|
||||
// repeated values in the target resource will be overwritten by the new values.
|
||||
// Note that a repeated field is only allowed in the last position of a field
|
||||
// mask.
|
||||
//
|
||||
// If a sub-message is specified in the last position of the field mask for an
|
||||
// update operation, then the existing sub-message in the target resource is
|
||||
// overwritten. Given the target message:
|
||||
//
|
||||
// f {
|
||||
// b {
|
||||
// d : 1
|
||||
// x : 2
|
||||
// }
|
||||
// c : 1
|
||||
// }
|
||||
//
|
||||
// And an update message:
|
||||
//
|
||||
// f {
|
||||
// b {
|
||||
// d : 10
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// then if the field mask is:
|
||||
//
|
||||
// paths: "f.b"
|
||||
//
|
||||
// then the result will be:
|
||||
//
|
||||
// f {
|
||||
// b {
|
||||
// d : 10
|
||||
// }
|
||||
// c : 1
|
||||
// }
|
||||
//
|
||||
// However, if the update mask was:
|
||||
//
|
||||
// paths: "f.b.d"
|
||||
//
|
||||
// then the result would be:
|
||||
//
|
||||
// f {
|
||||
// b {
|
||||
// d : 10
|
||||
// x : 2
|
||||
// }
|
||||
// c : 1
|
||||
// }
|
||||
//
|
||||
// In order to reset a field's value to the default, the field must
|
||||
// be in the mask and set to the default value in the provided resource.
|
||||
// Hence, in order to reset all fields of a resource, provide a default
|
||||
// instance of the resource and set all fields in the mask, or do
|
||||
// not provide a mask as described below.
|
||||
//
|
||||
// If a field mask is not present on update, the operation applies to
|
||||
// all fields (as if a field mask of all fields has been specified).
|
||||
// Note that in the presence of schema evolution, this may mean that
|
||||
// fields the client does not know and has therefore not filled into
|
||||
// the request will be reset to their default. If this is unwanted
|
||||
// behavior, a specific service may require a client to always specify
|
||||
// a field mask, producing an error if not.
|
||||
//
|
||||
// As with get operations, the location of the resource which
|
||||
// describes the updated values in the request message depends on the
|
||||
// operation kind. In any case, the effect of the field mask is
|
||||
// required to be honored by the API.
|
||||
//
|
||||
// ## Considerations for HTTP REST
|
||||
//
|
||||
// The HTTP kind of an update operation which uses a field mask must
|
||||
// be set to PATCH instead of PUT in order to satisfy HTTP semantics
|
||||
// (PUT must only be used for full updates).
|
||||
//
|
||||
// # JSON Encoding of Field Masks
|
||||
//
|
||||
// In JSON, a field mask is encoded as a single string where paths are
|
||||
// separated by a comma. Fields name in each path are converted
|
||||
// to/from lower-camel naming conventions.
|
||||
//
|
||||
// As an example, consider the following message declarations:
|
||||
//
|
||||
// message Profile {
|
||||
// User user = 1;
|
||||
// Photo photo = 2;
|
||||
// }
|
||||
// message User {
|
||||
// string display_name = 1;
|
||||
// string address = 2;
|
||||
// }
|
||||
//
|
||||
// In proto a field mask for `Profile` may look as such:
|
||||
//
|
||||
// mask {
|
||||
// paths: "user.display_name"
|
||||
// paths: "photo"
|
||||
// }
|
||||
//
|
||||
// In JSON, the same mask is represented as below:
|
||||
//
|
||||
// {
|
||||
// mask: "user.displayName,photo"
|
||||
// }
|
||||
//
|
||||
// # Field Masks and Oneof Fields
|
||||
//
|
||||
// Field masks treat fields in oneofs just as regular fields. Consider the
|
||||
// following message:
|
||||
//
|
||||
// message SampleMessage {
|
||||
// oneof test_oneof {
|
||||
// string name = 4;
|
||||
// SubMessage sub_message = 9;
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// The field mask can be:
|
||||
//
|
||||
// mask {
|
||||
// paths: "name"
|
||||
// }
|
||||
//
|
||||
// Or:
|
||||
//
|
||||
// mask {
|
||||
// paths: "sub_message"
|
||||
// }
|
||||
//
|
||||
// Note that oneof type names ("test_oneof" in this case) cannot be used in
|
||||
// paths.
|
||||
message FieldMask {
|
||||
// The set of field mask paths.
|
||||
repeated string paths = 1;
|
||||
}
|
96
vendor/github.com/gogo/protobuf/protobuf/google/protobuf/struct.proto
generated
vendored
Normal file
96
vendor/github.com/gogo/protobuf/protobuf/google/protobuf/struct.proto
generated
vendored
Normal file
@ -0,0 +1,96 @@
|
||||
// Protocol Buffers - Google's data interchange format
|
||||
// Copyright 2008 Google Inc. All rights reserved.
|
||||
// https://developers.google.com/protocol-buffers/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.protobuf;
|
||||
|
||||
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
|
||||
option go_package = "types";
|
||||
option java_package = "com.google.protobuf";
|
||||
option java_outer_classname = "StructProto";
|
||||
option java_multiple_files = true;
|
||||
option java_generate_equals_and_hash = true;
|
||||
option objc_class_prefix = "GPB";
|
||||
|
||||
|
||||
// `Struct` represents a structured data value, consisting of fields
|
||||
// which map to dynamically typed values. In some languages, `Struct`
|
||||
// might be supported by a native representation. For example, in
|
||||
// scripting languages like JS a struct is represented as an
|
||||
// object. The details of that representation are described together
|
||||
// with the proto support for the language.
|
||||
//
|
||||
// The JSON representation for `Struct` is JSON object.
|
||||
message Struct {
|
||||
// Unordered map of dynamically typed values.
|
||||
map<string, Value> fields = 1;
|
||||
}
|
||||
|
||||
// `Value` represents a dynamically typed value which can be either
|
||||
// null, a number, a string, a boolean, a recursive struct value, or a
|
||||
// list of values. A producer of value is expected to set one of that
|
||||
// variants, absence of any variant indicates an error.
|
||||
//
|
||||
// The JSON representation for `Value` is JSON value.
|
||||
message Value {
|
||||
// The kind of value.
|
||||
oneof kind {
|
||||
// Represents a null value.
|
||||
NullValue null_value = 1;
|
||||
// Represents a double value.
|
||||
double number_value = 2;
|
||||
// Represents a string value.
|
||||
string string_value = 3;
|
||||
// Represents a boolean value.
|
||||
bool bool_value = 4;
|
||||
// Represents a structured value.
|
||||
Struct struct_value = 5;
|
||||
// Represents a repeated `Value`.
|
||||
ListValue list_value = 6;
|
||||
}
|
||||
}
|
||||
|
||||
// `NullValue` is a singleton enumeration to represent the null value for the
|
||||
// `Value` type union.
|
||||
//
|
||||
// The JSON representation for `NullValue` is JSON `null`.
|
||||
enum NullValue {
|
||||
// Null value.
|
||||
NULL_VALUE = 0;
|
||||
}
|
||||
|
||||
// `ListValue` is a wrapper around a repeated field of values.
|
||||
//
|
||||
// The JSON representation for `ListValue` is JSON array.
|
||||
message ListValue {
|
||||
// Repeated field of dynamically typed values.
|
||||
repeated Value values = 1;
|
||||
}
|
111
vendor/github.com/gogo/protobuf/protobuf/google/protobuf/timestamp.proto
generated
vendored
Normal file
111
vendor/github.com/gogo/protobuf/protobuf/google/protobuf/timestamp.proto
generated
vendored
Normal file
@ -0,0 +1,111 @@
|
||||
// Protocol Buffers - Google's data interchange format
|
||||
// Copyright 2008 Google Inc. All rights reserved.
|
||||
// https://developers.google.com/protocol-buffers/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.protobuf;
|
||||
|
||||
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
|
||||
option cc_enable_arenas = true;
|
||||
option go_package = "types";
|
||||
option java_package = "com.google.protobuf";
|
||||
option java_outer_classname = "TimestampProto";
|
||||
option java_multiple_files = true;
|
||||
option java_generate_equals_and_hash = true;
|
||||
option objc_class_prefix = "GPB";
|
||||
|
||||
// A Timestamp represents a point in time independent of any time zone
|
||||
// or calendar, represented as seconds and fractions of seconds at
|
||||
// nanosecond resolution in UTC Epoch time. It is encoded using the
|
||||
// Proleptic Gregorian Calendar which extends the Gregorian calendar
|
||||
// backwards to year one. It is encoded assuming all minutes are 60
|
||||
// seconds long, i.e. leap seconds are "smeared" so that no leap second
|
||||
// table is needed for interpretation. Range is from
|
||||
// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
|
||||
// By restricting to that range, we ensure that we can convert to
|
||||
// and from RFC 3339 date strings.
|
||||
// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
|
||||
//
|
||||
// Example 1: Compute Timestamp from POSIX `time()`.
|
||||
//
|
||||
// Timestamp timestamp;
|
||||
// timestamp.set_seconds(time(NULL));
|
||||
// timestamp.set_nanos(0);
|
||||
//
|
||||
// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
|
||||
//
|
||||
// struct timeval tv;
|
||||
// gettimeofday(&tv, NULL);
|
||||
//
|
||||
// Timestamp timestamp;
|
||||
// timestamp.set_seconds(tv.tv_sec);
|
||||
// timestamp.set_nanos(tv.tv_usec * 1000);
|
||||
//
|
||||
// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
|
||||
//
|
||||
// FILETIME ft;
|
||||
// GetSystemTimeAsFileTime(&ft);
|
||||
// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
|
||||
//
|
||||
// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
|
||||
// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
|
||||
// Timestamp timestamp;
|
||||
// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
|
||||
// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
|
||||
//
|
||||
// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
|
||||
//
|
||||
// long millis = System.currentTimeMillis();
|
||||
//
|
||||
// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
|
||||
// .setNanos((int) ((millis % 1000) * 1000000)).build();
|
||||
//
|
||||
//
|
||||
// Example 5: Compute Timestamp from current time in Python.
|
||||
//
|
||||
// now = time.time()
|
||||
// seconds = int(now)
|
||||
// nanos = int((now - seconds) * 10**9)
|
||||
// timestamp = Timestamp(seconds=seconds, nanos=nanos)
|
||||
//
|
||||
//
|
||||
message Timestamp {
|
||||
|
||||
// Represents seconds of UTC time since Unix epoch
|
||||
// 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to
|
||||
// 9999-12-31T23:59:59Z inclusive.
|
||||
int64 seconds = 1;
|
||||
|
||||
// Non-negative fractions of a second at nanosecond resolution. Negative
|
||||
// second values with fractions must still have non-negative nanos values
|
||||
// that count forward in time. Must be from 0 to 999,999,999
|
||||
// inclusive.
|
||||
int32 nanos = 2;
|
||||
}
|
119
vendor/github.com/gogo/protobuf/protobuf/google/protobuf/wrappers.proto
generated
vendored
Normal file
119
vendor/github.com/gogo/protobuf/protobuf/google/protobuf/wrappers.proto
generated
vendored
Normal file
@ -0,0 +1,119 @@
|
||||
// Protocol Buffers - Google's data interchange format
|
||||
// Copyright 2008 Google Inc. All rights reserved.
|
||||
// https://developers.google.com/protocol-buffers/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Wrappers for primitive (non-message) types. These types are useful
|
||||
// for embedding primitives in the `google.protobuf.Any` type and for places
|
||||
// where we need to distinguish between the absence of a primitive
|
||||
// typed field and its default value.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.protobuf;
|
||||
|
||||
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
|
||||
option cc_enable_arenas = true;
|
||||
option go_package = "types";
|
||||
option java_package = "com.google.protobuf";
|
||||
option java_outer_classname = "WrappersProto";
|
||||
option java_multiple_files = true;
|
||||
option java_generate_equals_and_hash = true;
|
||||
option objc_class_prefix = "GPB";
|
||||
|
||||
// Wrapper message for `double`.
|
||||
//
|
||||
// The JSON representation for `DoubleValue` is JSON number.
|
||||
message DoubleValue {
|
||||
// The double value.
|
||||
double value = 1;
|
||||
}
|
||||
|
||||
// Wrapper message for `float`.
|
||||
//
|
||||
// The JSON representation for `FloatValue` is JSON number.
|
||||
message FloatValue {
|
||||
// The float value.
|
||||
float value = 1;
|
||||
}
|
||||
|
||||
// Wrapper message for `int64`.
|
||||
//
|
||||
// The JSON representation for `Int64Value` is JSON string.
|
||||
message Int64Value {
|
||||
// The int64 value.
|
||||
int64 value = 1;
|
||||
}
|
||||
|
||||
// Wrapper message for `uint64`.
|
||||
//
|
||||
// The JSON representation for `UInt64Value` is JSON string.
|
||||
message UInt64Value {
|
||||
// The uint64 value.
|
||||
uint64 value = 1;
|
||||
}
|
||||
|
||||
// Wrapper message for `int32`.
|
||||
//
|
||||
// The JSON representation for `Int32Value` is JSON number.
|
||||
message Int32Value {
|
||||
// The int32 value.
|
||||
int32 value = 1;
|
||||
}
|
||||
|
||||
// Wrapper message for `uint32`.
|
||||
//
|
||||
// The JSON representation for `UInt32Value` is JSON number.
|
||||
message UInt32Value {
|
||||
// The uint32 value.
|
||||
uint32 value = 1;
|
||||
}
|
||||
|
||||
// Wrapper message for `bool`.
|
||||
//
|
||||
// The JSON representation for `BoolValue` is JSON `true` and `false`.
|
||||
message BoolValue {
|
||||
// The bool value.
|
||||
bool value = 1;
|
||||
}
|
||||
|
||||
// Wrapper message for `string`.
|
||||
//
|
||||
// The JSON representation for `StringValue` is JSON string.
|
||||
message StringValue {
|
||||
// The string value.
|
||||
string value = 1;
|
||||
}
|
||||
|
||||
// Wrapper message for `bytes`.
|
||||
//
|
||||
// The JSON representation for `BytesValue` is JSON string.
|
||||
message BytesValue {
|
||||
// The bytes value.
|
||||
bytes value = 1;
|
||||
}
|
36
vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile
generated
vendored
36
vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile
generated
vendored
@ -1,36 +0,0 @@
|
||||
# Go support for Protocol Buffers - Google's data interchange format
|
||||
#
|
||||
# Copyright 2010 The Go Authors. All rights reserved.
|
||||
# https://github.com/golang/protobuf
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following disclaimer
|
||||
# in the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
# * Neither the name of Google Inc. nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
regenerate:
|
||||
go install github.com/gogo/protobuf/protoc-gen-gogo
|
||||
go install github.com/gogo/protobuf/protoc-gen-gostring
|
||||
protoc --gogo_out=. -I=../../protobuf/google/protobuf ../../protobuf/google/protobuf/descriptor.proto
|
||||
protoc --gostring_out=. -I=../../protobuf/google/protobuf ../../protobuf/google/protobuf/descriptor.proto
|
39
vendor/github.com/gogo/protobuf/types/Makefile
generated
vendored
39
vendor/github.com/gogo/protobuf/types/Makefile
generated
vendored
@ -1,39 +0,0 @@
|
||||
# Protocol Buffers for Go with Gadgets
|
||||
#
|
||||
# Copyright (c) 2016, The GoGo Authors. All rights reserved.
|
||||
# http://github.com/gogo/protobuf
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following disclaimer
|
||||
# in the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
regenerate:
|
||||
go install github.com/gogo/protobuf/protoc-gen-gogotypes
|
||||
go install github.com/gogo/protobuf/protoc-min-version
|
||||
|
||||
protoc-min-version --version="3.0.0" --gogotypes_out=. -I=../protobuf/google/protobuf ../protobuf/google/protobuf/any.proto
|
||||
protoc-min-version --version="3.0.0" --gogotypes_out=. -I=../protobuf/google/protobuf ../protobuf/google/protobuf/empty.proto
|
||||
protoc-min-version --version="3.0.0" --gogotypes_out=. -I=../protobuf/google/protobuf ../protobuf/google/protobuf/timestamp.proto
|
||||
protoc-min-version --version="3.0.0" --gogotypes_out=. -I=../protobuf/google/protobuf ../protobuf/google/protobuf/duration.proto
|
||||
protoc-min-version --version="3.0.0" --gogotypes_out=. -I=../protobuf/google/protobuf ../protobuf/google/protobuf/struct.proto
|
||||
protoc-min-version --version="3.0.0" --gogotypes_out=. -I=../protobuf/google/protobuf ../protobuf/google/protobuf/wrappers.proto
|
||||
protoc-min-version --version="3.0.0" --gogotypes_out=. -I=../protobuf/google/protobuf ../protobuf/google/protobuf/field_mask.proto
|
12
vendor/github.com/golang/mock/AUTHORS
generated
vendored
12
vendor/github.com/golang/mock/AUTHORS
generated
vendored
@ -1,12 +0,0 @@
|
||||
# This is the official list of GoMock authors for copyright purposes.
|
||||
# This file is distinct from the CONTRIBUTORS files.
|
||||
# See the latter for an explanation.
|
||||
|
||||
# Names should be added to this file as
|
||||
# Name or Organization <email address>
|
||||
# The email address is not required for organizations.
|
||||
|
||||
# Please keep the list sorted.
|
||||
|
||||
Alex Reece <awreece@gmail.com>
|
||||
Google Inc.
|
37
vendor/github.com/golang/mock/CONTRIBUTORS
generated
vendored
37
vendor/github.com/golang/mock/CONTRIBUTORS
generated
vendored
@ -1,37 +0,0 @@
|
||||
# This is the official list of people who can contribute (and typically
|
||||
# have contributed) code to the gomock repository.
|
||||
# The AUTHORS file lists the copyright holders; this file
|
||||
# lists people. For example, Google employees are listed here
|
||||
# but not in AUTHORS, because Google holds the copyright.
|
||||
#
|
||||
# The submission process automatically checks to make sure
|
||||
# that people submitting code are listed in this file (by email address).
|
||||
#
|
||||
# Names should be added to this file only after verifying that
|
||||
# the individual or the individual's organization has agreed to
|
||||
# the appropriate Contributor License Agreement, found here:
|
||||
#
|
||||
# http://code.google.com/legal/individual-cla-v1.0.html
|
||||
# http://code.google.com/legal/corporate-cla-v1.0.html
|
||||
#
|
||||
# The agreement for individuals can be filled out on the web.
|
||||
#
|
||||
# When adding J Random Contributor's name to this file,
|
||||
# either J's name or J's organization's name should be
|
||||
# added to the AUTHORS file, depending on whether the
|
||||
# individual or corporate CLA was used.
|
||||
|
||||
# Names should be added to this file like so:
|
||||
# Name <email address>
|
||||
#
|
||||
# An entry with two email addresses specifies that the
|
||||
# first address should be used in the submit logs and
|
||||
# that the second address should be recognized as the
|
||||
# same person when interacting with Rietveld.
|
||||
|
||||
# Please keep the list sorted.
|
||||
|
||||
Aaron Jacobs <jacobsa@google.com> <aaronjjacobs@gmail.com>
|
||||
Alex Reece <awreece@gmail.com>
|
||||
David Symonds <dsymonds@golang.org>
|
||||
Ryan Barrett <ryanb@google.com>
|
81
vendor/github.com/golang/mock/README.md
generated
vendored
Normal file
81
vendor/github.com/golang/mock/README.md
generated
vendored
Normal file
@ -0,0 +1,81 @@
|
||||
GoMock is a mocking framework for the [Go programming language][golang]. It
|
||||
integrates well with Go's built-in `testing` package, but can be used in other
|
||||
contexts too.
|
||||
|
||||
|
||||
Installation
|
||||
------------
|
||||
|
||||
Once you have [installed Go][golang-install], run these commands
|
||||
to install the `gomock` package and the `mockgen` tool:
|
||||
|
||||
go get github.com/golang/mock/gomock
|
||||
go get github.com/golang/mock/mockgen
|
||||
|
||||
|
||||
Documentation
|
||||
-------------
|
||||
|
||||
After installing, you can use `go doc` to get documentation:
|
||||
|
||||
go doc github.com/golang/mock/gomock
|
||||
|
||||
Alternatively, there is an online reference for the package hosted on GoPkgDoc
|
||||
[here][gomock-ref].
|
||||
|
||||
|
||||
Running mockgen
|
||||
---------------
|
||||
|
||||
`mockgen` has two modes of operation: source and reflect.
|
||||
Source mode generates mock interfaces from a source file.
|
||||
It is enabled by using the -source flag. Other flags that
|
||||
may be useful in this mode are -imports and -aux_files.
|
||||
|
||||
Example:
|
||||
|
||||
mockgen -source=foo.go [other options]
|
||||
|
||||
Reflect mode generates mock interfaces by building a program
|
||||
that uses reflection to understand interfaces. It is enabled
|
||||
by passing two non-flag arguments: an import path, and a
|
||||
comma-separated list of symbols.
|
||||
|
||||
Example:
|
||||
|
||||
mockgen database/sql/driver Conn,Driver
|
||||
|
||||
The `mockgen` command is used to generate source code for a mock
|
||||
class given a Go source file containing interfaces to be mocked.
|
||||
It supports the following flags:
|
||||
|
||||
* `-source`: A file containing interfaces to be mocked.
|
||||
|
||||
* `-destination`: A file to which to write the resulting source code. If you
|
||||
don't set this, the code is printed to standard output.
|
||||
|
||||
* `-package`: The package to use for the resulting mock class
|
||||
source code. If you don't set this, the package name is `mock_` concatenated
|
||||
with the package of the input file.
|
||||
|
||||
* `-imports`: A list of explicit imports that should be used in the resulting
|
||||
source code, specified as a comma-separated list of elements of the form
|
||||
`foo=bar/baz`, where `bar/baz` is the package being imported and `foo` is
|
||||
the identifier to use for the package in the generated source code.
|
||||
|
||||
* `-aux_files`: A list of additional files that should be consulted to
|
||||
resolve e.g. embedded interfaces defined in a different file. This is
|
||||
specified as a comma-separated list of elements of the form
|
||||
`foo=bar/baz.go`, where `bar/baz.go` is the source file and `foo` is the
|
||||
package name of that file used by the -source file.
|
||||
|
||||
For an example of the use of `mockgen`, see the `sample/` directory. In simple
|
||||
cases, you will need only the `-source` flag.
|
||||
|
||||
|
||||
TODO: Brief overview of how to create mock objects and set up expectations, and
|
||||
an example.
|
||||
|
||||
[golang]: http://golang.org/
|
||||
[golang-install]: http://golang.org/doc/install.html#releases
|
||||
[gomock-ref]: http://godoc.org/github.com/golang/mock/gomock
|
3
vendor/github.com/golang/protobuf/AUTHORS
generated
vendored
3
vendor/github.com/golang/protobuf/AUTHORS
generated
vendored
@ -1,3 +0,0 @@
|
||||
# This source code refers to The Go Authors for copyright purposes.
|
||||
# The master list of authors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/AUTHORS.
|
3
vendor/github.com/golang/protobuf/CONTRIBUTORS
generated
vendored
3
vendor/github.com/golang/protobuf/CONTRIBUTORS
generated
vendored
@ -1,3 +0,0 @@
|
||||
# This source code was written by the Go contributors.
|
||||
# The master list of contributors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/CONTRIBUTORS.
|
240
vendor/github.com/golang/protobuf/README.md
generated
vendored
Normal file
240
vendor/github.com/golang/protobuf/README.md
generated
vendored
Normal file
@ -0,0 +1,240 @@
|
||||
# Go support for Protocol Buffers
|
||||
|
||||
Google's data interchange format.
|
||||
Copyright 2010 The Go Authors.
|
||||
https://github.com/golang/protobuf
|
||||
|
||||
This package and the code it generates requires at least Go 1.4.
|
||||
|
||||
This software implements Go bindings for protocol buffers. For
|
||||
information about protocol buffers themselves, see
|
||||
https://developers.google.com/protocol-buffers/
|
||||
|
||||
## Installation ##
|
||||
|
||||
To use this software, you must:
|
||||
- Install the standard C++ implementation of protocol buffers from
|
||||
https://developers.google.com/protocol-buffers/
|
||||
- Of course, install the Go compiler and tools from
|
||||
https://golang.org/
|
||||
See
|
||||
https://golang.org/doc/install
|
||||
for details or, if you are using gccgo, follow the instructions at
|
||||
https://golang.org/doc/install/gccgo
|
||||
- Grab the code from the repository and install the proto package.
|
||||
The simplest way is to run `go get -u github.com/golang/protobuf/protoc-gen-go`.
|
||||
The compiler plugin, protoc-gen-go, will be installed in $GOBIN,
|
||||
defaulting to $GOPATH/bin. It must be in your $PATH for the protocol
|
||||
compiler, protoc, to find it.
|
||||
|
||||
This software has two parts: a 'protocol compiler plugin' that
|
||||
generates Go source files that, once compiled, can access and manage
|
||||
protocol buffers; and a library that implements run-time support for
|
||||
encoding (marshaling), decoding (unmarshaling), and accessing protocol
|
||||
buffers.
|
||||
|
||||
There is support for gRPC in Go using protocol buffers.
|
||||
See the note at the bottom of this file for details.
|
||||
|
||||
There are no insertion points in the plugin.
|
||||
|
||||
|
||||
## Using protocol buffers with Go ##
|
||||
|
||||
Once the software is installed, there are two steps to using it.
|
||||
First you must compile the protocol buffer definitions and then import
|
||||
them, with the support library, into your program.
|
||||
|
||||
To compile the protocol buffer definition, run protoc with the --go_out
|
||||
parameter set to the directory you want to output the Go code to.
|
||||
|
||||
protoc --go_out=. *.proto
|
||||
|
||||
The generated files will be suffixed .pb.go. See the Test code below
|
||||
for an example using such a file.
|
||||
|
||||
|
||||
The package comment for the proto library contains text describing
|
||||
the interface provided in Go for protocol buffers. Here is an edited
|
||||
version.
|
||||
|
||||
==========
|
||||
|
||||
The proto package converts data structures to and from the
|
||||
wire format of protocol buffers. It works in concert with the
|
||||
Go source code generated for .proto files by the protocol compiler.
|
||||
|
||||
A summary of the properties of the protocol buffer interface
|
||||
for a protocol buffer variable v:
|
||||
|
||||
- Names are turned from camel_case to CamelCase for export.
|
||||
- There are no methods on v to set fields; just treat
|
||||
them as structure fields.
|
||||
- There are getters that return a field's value if set,
|
||||
and return the field's default value if unset.
|
||||
The getters work even if the receiver is a nil message.
|
||||
- The zero value for a struct is its correct initialization state.
|
||||
All desired fields must be set before marshaling.
|
||||
- A Reset() method will restore a protobuf struct to its zero state.
|
||||
- Non-repeated fields are pointers to the values; nil means unset.
|
||||
That is, optional or required field int32 f becomes F *int32.
|
||||
- Repeated fields are slices.
|
||||
- Helper functions are available to aid the setting of fields.
|
||||
Helpers for getting values are superseded by the
|
||||
GetFoo methods and their use is deprecated.
|
||||
msg.Foo = proto.String("hello") // set field
|
||||
- Constants are defined to hold the default values of all fields that
|
||||
have them. They have the form Default_StructName_FieldName.
|
||||
Because the getter methods handle defaulted values,
|
||||
direct use of these constants should be rare.
|
||||
- Enums are given type names and maps from names to values.
|
||||
Enum values are prefixed with the enum's type name. Enum types have
|
||||
a String method, and a Enum method to assist in message construction.
|
||||
- Nested groups and enums have type names prefixed with the name of
|
||||
the surrounding message type.
|
||||
- Extensions are given descriptor names that start with E_,
|
||||
followed by an underscore-delimited list of the nested messages
|
||||
that contain it (if any) followed by the CamelCased name of the
|
||||
extension field itself. HasExtension, ClearExtension, GetExtension
|
||||
and SetExtension are functions for manipulating extensions.
|
||||
- Oneof field sets are given a single field in their message,
|
||||
with distinguished wrapper types for each possible field value.
|
||||
- Marshal and Unmarshal are functions to encode and decode the wire format.
|
||||
|
||||
When the .proto file specifies `syntax="proto3"`, there are some differences:
|
||||
|
||||
- Non-repeated fields of non-message type are values instead of pointers.
|
||||
- Enum types do not get an Enum method.
|
||||
|
||||
Consider file test.proto, containing
|
||||
|
||||
```proto
|
||||
package example;
|
||||
|
||||
enum FOO { X = 17; };
|
||||
|
||||
message Test {
|
||||
required string label = 1;
|
||||
optional int32 type = 2 [default=77];
|
||||
repeated int64 reps = 3;
|
||||
optional group OptionalGroup = 4 {
|
||||
required string RequiredField = 5;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
To create and play with a Test object from the example package,
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"path/to/example"
|
||||
)
|
||||
|
||||
func main() {
|
||||
test := &example.Test {
|
||||
Label: proto.String("hello"),
|
||||
Type: proto.Int32(17),
|
||||
Reps: []int64{1, 2, 3},
|
||||
Optionalgroup: &example.Test_OptionalGroup {
|
||||
RequiredField: proto.String("good bye"),
|
||||
},
|
||||
}
|
||||
data, err := proto.Marshal(test)
|
||||
if err != nil {
|
||||
log.Fatal("marshaling error: ", err)
|
||||
}
|
||||
newTest := &example.Test{}
|
||||
err = proto.Unmarshal(data, newTest)
|
||||
if err != nil {
|
||||
log.Fatal("unmarshaling error: ", err)
|
||||
}
|
||||
// Now test and newTest contain the same data.
|
||||
if test.GetLabel() != newTest.GetLabel() {
|
||||
log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
|
||||
}
|
||||
// etc.
|
||||
}
|
||||
```
|
||||
|
||||
## Parameters ##
|
||||
|
||||
To pass extra parameters to the plugin, use a comma-separated
|
||||
parameter list separated from the output directory by a colon:
|
||||
|
||||
|
||||
protoc --go_out=plugins=grpc,import_path=mypackage:. *.proto
|
||||
|
||||
|
||||
- `import_prefix=xxx` - a prefix that is added onto the beginning of
|
||||
all imports. Useful for things like generating protos in a
|
||||
subdirectory, or regenerating vendored protobufs in-place.
|
||||
- `import_path=foo/bar` - used as the package if no input files
|
||||
declare `go_package`. If it contains slashes, everything up to the
|
||||
rightmost slash is ignored.
|
||||
- `plugins=plugin1+plugin2` - specifies the list of sub-plugins to
|
||||
load. The only plugin in this repo is `grpc`.
|
||||
- `Mfoo/bar.proto=quux/shme` - declares that foo/bar.proto is
|
||||
associated with Go package quux/shme. This is subject to the
|
||||
import_prefix parameter.
|
||||
|
||||
## gRPC Support ##
|
||||
|
||||
If a proto file specifies RPC services, protoc-gen-go can be instructed to
|
||||
generate code compatible with gRPC (http://www.grpc.io/). To do this, pass
|
||||
the `plugins` parameter to protoc-gen-go; the usual way is to insert it into
|
||||
the --go_out argument to protoc:
|
||||
|
||||
protoc --go_out=plugins=grpc:. *.proto
|
||||
|
||||
## Compatibility ##
|
||||
|
||||
The library and the generated code are expected to be stable over time.
|
||||
However, we reserve the right to make breaking changes without notice for the
|
||||
following reasons:
|
||||
|
||||
- Security. A security issue in the specification or implementation may come to
|
||||
light whose resolution requires breaking compatibility. We reserve the right
|
||||
to address such security issues.
|
||||
- Unspecified behavior. There are some aspects of the Protocol Buffers
|
||||
specification that are undefined. Programs that depend on such unspecified
|
||||
behavior may break in future releases.
|
||||
- Specification errors or changes. If it becomes necessary to address an
|
||||
inconsistency, incompleteness, or change in the Protocol Buffers
|
||||
specification, resolving the issue could affect the meaning or legality of
|
||||
existing programs. We reserve the right to address such issues, including
|
||||
updating the implementations.
|
||||
- Bugs. If the library has a bug that violates the specification, a program
|
||||
that depends on the buggy behavior may break if the bug is fixed. We reserve
|
||||
the right to fix such bugs.
|
||||
- Adding methods or fields to generated structs. These may conflict with field
|
||||
names that already exist in a schema, causing applications to break. When the
|
||||
code generator encounters a field in the schema that would collide with a
|
||||
generated field or method name, the code generator will append an underscore
|
||||
to the generated field or method name.
|
||||
- Adding, removing, or changing methods or fields in generated structs that
|
||||
start with `XXX`. These parts of the generated code are exported out of
|
||||
necessity, but should not be considered part of the public API.
|
||||
- Adding, removing, or changing unexported symbols in generated code.
|
||||
|
||||
Any breaking changes outside of these will be announced 6 months in advance to
|
||||
protobuf@googlegroups.com.
|
||||
|
||||
You should, whenever possible, use generated code created by the `protoc-gen-go`
|
||||
tool built at the same commit as the `proto` package. The `proto` package
|
||||
declares package-level constants in the form `ProtoPackageIsVersionX`.
|
||||
Application code and generated code may depend on one of these constants to
|
||||
ensure that compilation will fail if the available version of the proto library
|
||||
is too old. Whenever we make a change to the generated code that requires newer
|
||||
library support, in the same commit we will increment the version number of the
|
||||
generated code and declare a new package-level constant whose name incorporates
|
||||
the latest version number. Removing a compatibility constant is considered a
|
||||
breaking change and would be subject to the announcement policy stated above.
|
||||
|
||||
The `protoc-gen-go/generator` package exposes a plugin interface,
|
||||
which is used by the gRPC code generation. This interface is not
|
||||
supported and is subject to incompatible changes without notice.
|
43
vendor/github.com/golang/protobuf/proto/Makefile
generated
vendored
43
vendor/github.com/golang/protobuf/proto/Makefile
generated
vendored
@ -1,43 +0,0 @@
|
||||
# Go support for Protocol Buffers - Google's data interchange format
|
||||
#
|
||||
# Copyright 2010 The Go Authors. All rights reserved.
|
||||
# https://github.com/golang/protobuf
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following disclaimer
|
||||
# in the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
# * Neither the name of Google Inc. nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
install:
|
||||
go install
|
||||
|
||||
test: install generate-test-pbs
|
||||
go test
|
||||
|
||||
|
||||
generate-test-pbs:
|
||||
make install
|
||||
make -C testdata
|
||||
protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. proto3_proto/proto3.proto
|
||||
make
|
185
vendor/github.com/kubernetes-incubator/cri-o/README.md
generated
vendored
Normal file
185
vendor/github.com/kubernetes-incubator/cri-o/README.md
generated
vendored
Normal file
@ -0,0 +1,185 @@
|
||||

|
||||
# cri-o - OCI-based implementation of Kubernetes Container Runtime Interface
|
||||
|
||||
[](https://travis-ci.org/kubernetes-incubator/cri-o)
|
||||
[](https://goreportcard.com/report/github.com/kubernetes-incubator/cri-o)
|
||||
|
||||
### Status: pre-alpha
|
||||
|
||||
## What is the scope of this project?
|
||||
|
||||
cri-o is meant to provide an integration path between OCI conformant runtimes and the kubelet.
|
||||
Specifically, it implements the Kubelet Container Runtime Interface (CRI) using OCI conformant runtimes.
|
||||
The scope of cri-o is tied to the scope of the CRI.
|
||||
|
||||
At a high level, we expect the scope of cri-o to be restricted to the following functionalities:
|
||||
|
||||
* Support multiple image formats including the existing Docker image format
|
||||
* Support for multiple means to download images including trust & image verification
|
||||
* Container image management (managing image layers, overlay filesystems, etc)
|
||||
* Container process lifecycle management
|
||||
* Monitoring and logging required to satisfy the CRI
|
||||
* Resource isolation as required by the CRI
|
||||
|
||||
## What is not in scope for this project?
|
||||
|
||||
* Building, signing and pushing images to various image storages
|
||||
* A CLI utility for interacting with cri-o. Any CLIs built as part of this project are only meant for testing this project and there will be no guarantees on the backwards compatibility with it.
|
||||
|
||||
This is an implementation of the Kubernetes Container Runtime Interface (CRI) that will allow Kubernetes to directly launch and manage Open Container Initiative (OCI) containers.
|
||||
|
||||
The plan is to use OCI projects and best of breed libraries for different aspects:
|
||||
- Runtime: [runc](https://github.com/opencontainers/runc) (or any OCI runtime-spec implementation) and [oci runtime tools](https://github.com/opencontainers/runtime-tools)
|
||||
- Images: Image management using [containers/image](https://github.com/containers/image)
|
||||
- Storage: Storage and management of image layers using [containers/storage](https://github.com/containers/storage)
|
||||
- Networking: Networking support through use of [CNI](https://github.com/containernetworking/cni)
|
||||
|
||||
It is currently in active development in the Kubernetes community through the [design proposal](https://github.com/kubernetes/kubernetes/pull/26788). Questions and issues should be raised in the Kubernetes [sig-node Slack channel](https://kubernetes.slack.com/archives/sig-node).
|
||||
|
||||
## Getting started
|
||||
|
||||
### Prerequisites
|
||||
|
||||
`runc` version 1.0.0.rc1 or greater is expected to be installed on the system. It is picked up as the default runtime by ocid.
|
||||
|
||||
### Build Dependencies
|
||||
|
||||
**Required**
|
||||
|
||||
Fedora, CentOS, RHEL, and related distributions:
|
||||
|
||||
```bash
|
||||
yum install -y \
|
||||
btrfs-progs-devel \
|
||||
device-mapper-devel \
|
||||
glib2-devel \
|
||||
glibc-devel \
|
||||
glibc-static \
|
||||
gpgme-devel \
|
||||
libassuan-devel \
|
||||
libgpg-error-devel \
|
||||
libseccomp-devel \
|
||||
libselinux-devel \
|
||||
pkgconfig \
|
||||
runc
|
||||
```
|
||||
|
||||
Debian, Ubuntu, and related distributions:
|
||||
|
||||
```bash
|
||||
apt install -y \
|
||||
btrfs-tools \
|
||||
libassuan-dev \
|
||||
libdevmapper-dev \
|
||||
libglib2.0-dev \
|
||||
libc6-dev \
|
||||
libgpgme11-dev \
|
||||
libgpg-error-dev \
|
||||
libseccomp-dev \
|
||||
libselinux1-dev \
|
||||
pkg-config \
|
||||
runc
|
||||
```
|
||||
|
||||
If using an older release or a long-term support release, be careful to double-check that the version of `runc` is new enough, or else build your own.
|
||||
|
||||
**Optional**
|
||||
|
||||
Fedora, CentOS, RHEL, and related distributions:
|
||||
|
||||
(no optional packages)
|
||||
|
||||
Debian, Ubuntu, and related distributions:
|
||||
|
||||
```bash
|
||||
apt install -y \
|
||||
libapparmor-dev
|
||||
```
|
||||
|
||||
### Get Source Code
|
||||
|
||||
As with other Go projects, cri-o must be cloned into a directory structure like:
|
||||
|
||||
```
|
||||
GOPATH
|
||||
└── src
|
||||
└── github.com
|
||||
└── kubernetes-incubator
|
||||
└── cri-o
|
||||
```
|
||||
|
||||
First, configure a `GOPATH` (if you are using go1.8 or later, this defaults to `~/go`).
|
||||
|
||||
```bash
|
||||
export GOPATH=~/go
|
||||
mkdir -p $GOPATH
|
||||
```
|
||||
|
||||
Next, clone the source code using:
|
||||
|
||||
```bash
|
||||
mkdir -p $GOPATH/src/github.com/kubernetes-incubator
|
||||
cd $_ # or cd $GOPATH/src/github.com/kubernetes-incubator
|
||||
git clone https://github.com/kubernetes-incubator/cri-o # or your fork
|
||||
cd cri-o
|
||||
```
|
||||
|
||||
### Build
|
||||
|
||||
```bash
|
||||
make install.tools
|
||||
make
|
||||
sudo make install
|
||||
```
|
||||
|
||||
Otherwise, if you do not want to build `cri-o` with seccomp support you can add `BUILDTAGS=""` when running make.
|
||||
|
||||
```bash
|
||||
make BUILDTAGS=""
|
||||
sudo make install
|
||||
```
|
||||
|
||||
#### Build Tags
|
||||
|
||||
`cri-o` supports optional build tags for compiling support of various features.
|
||||
To add build tags to the make option the `BUILDTAGS` variable must be set.
|
||||
|
||||
```bash
|
||||
make BUILDTAGS='seccomp apparmor'
|
||||
```
|
||||
|
||||
| Build Tag | Feature | Dependency |
|
||||
|-----------|------------------------------------|-------------|
|
||||
| seccomp | syscall filtering | libseccomp |
|
||||
| selinux | selinux process and mount labeling | libselinux |
|
||||
| apparmor | apparmor profile support | libapparmor |
|
||||
|
||||
### Running pods and containers
|
||||
|
||||
Follow this [tutorial](tutorial.md) to get started with CRI-O.
|
||||
|
||||
### Setup CNI networking
|
||||
|
||||
A proper description of setting up CNI networking is given in the
|
||||
[`contrib/cni` README](contrib/cni/README.md). But the gist is that you need to
|
||||
have some basic network configurations enabled and CNI plugins installed on
|
||||
your system.
|
||||
|
||||
### Running with kubernetes
|
||||
|
||||
You can run a local version of kubernetes with cri-o using `local-up-cluster.sh`:
|
||||
|
||||
1. Clone the [kubernetes repository](https://github.com/kubernetes/kubernetes)
|
||||
1. Start the cri-o daemon (`ocid`)
|
||||
1. From the kubernetes project directory, run: `CONTAINER_RUNTIME=remote CONTAINER_RUNTIME_ENDPOINT='/var/run/ocid.sock --runtime-request-timeout=15m' ./hack/local-up-cluster.sh`
|
||||
|
||||
To run a full cluster, see [the instructions](kubernetes.md).
|
||||
|
||||
### Current Roadmap
|
||||
|
||||
1. Basic pod/container lifecycle, basic image pull (already works)
|
||||
1. Support for tty handling and state management
|
||||
1. Basic integration with kubelet once client side changes are ready
|
||||
1. Support for log management, networking integration using CNI, pluggable image/storage management
|
||||
1. Support for exec/attach
|
||||
1. Target fully automated kubernetes testing without failures
|
149
vendor/github.com/kubernetes-incubator/cri-o/conmon/cmsg.c
generated
vendored
Normal file
149
vendor/github.com/kubernetes-incubator/cri-o/conmon/cmsg.c
generated
vendored
Normal file
@ -0,0 +1,149 @@
|
||||
/*
|
||||
* Copyright 2016 SUSE LLC
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/* NOTE: This code comes directly from runc/libcontainer/utils/cmsg.c. */
|
||||
|
||||
#include <errno.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/types.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include "cmsg.h"
|
||||
|
||||
#define error(fmt, ...) \
|
||||
({ \
|
||||
fprintf(stderr, "nsenter: " fmt ": %m\n", ##__VA_ARGS__); \
|
||||
errno = ECOMM; \
|
||||
goto err; /* return value */ \
|
||||
})
|
||||
|
||||
/*
|
||||
* Sends a file descriptor along the sockfd provided. Returns the return
|
||||
* value of sendmsg(2). Any synchronisation and preparation of state
|
||||
* should be done external to this (we expect the other side to be in
|
||||
* recvfd() in the code).
|
||||
*/
|
||||
ssize_t sendfd(int sockfd, struct file_t file)
|
||||
{
|
||||
struct msghdr msg = {0};
|
||||
struct iovec iov[1] = {0};
|
||||
struct cmsghdr *cmsg;
|
||||
int *fdptr;
|
||||
|
||||
union {
|
||||
char buf[CMSG_SPACE(sizeof(file.fd))];
|
||||
struct cmsghdr align;
|
||||
} u;
|
||||
|
||||
/*
|
||||
* We need to send some other data along with the ancillary data,
|
||||
* otherwise the other side won't recieve any data. This is very
|
||||
* well-hidden in the documentation (and only applies to
|
||||
* SOCK_STREAM). See the bottom part of unix(7).
|
||||
*/
|
||||
iov[0].iov_base = file.name;
|
||||
iov[0].iov_len = strlen(file.name) + 1;
|
||||
|
||||
msg.msg_name = NULL;
|
||||
msg.msg_namelen = 0;
|
||||
msg.msg_iov = iov;
|
||||
msg.msg_iovlen = 1;
|
||||
msg.msg_control = u.buf;
|
||||
msg.msg_controllen = sizeof(u.buf);
|
||||
|
||||
cmsg = CMSG_FIRSTHDR(&msg);
|
||||
cmsg->cmsg_level = SOL_SOCKET;
|
||||
cmsg->cmsg_type = SCM_RIGHTS;
|
||||
cmsg->cmsg_len = CMSG_LEN(sizeof(int));
|
||||
|
||||
fdptr = (int *) CMSG_DATA(cmsg);
|
||||
memcpy(fdptr, &file.fd, sizeof(int));
|
||||
|
||||
return sendmsg(sockfd, &msg, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Receives a file descriptor from the sockfd provided. Returns the file
|
||||
* descriptor as sent from sendfd(). It will return the file descriptor
|
||||
* or die (literally) trying. Any synchronisation and preparation of
|
||||
* state should be done external to this (we expect the other side to be
|
||||
* in sendfd() in the code).
|
||||
*/
|
||||
struct file_t recvfd(int sockfd)
|
||||
{
|
||||
struct msghdr msg = {0};
|
||||
struct iovec iov[1] = {0};
|
||||
struct cmsghdr *cmsg;
|
||||
struct file_t file = {0};
|
||||
int *fdptr;
|
||||
int olderrno;
|
||||
|
||||
union {
|
||||
char buf[CMSG_SPACE(sizeof(file.fd))];
|
||||
struct cmsghdr align;
|
||||
} u;
|
||||
|
||||
/* Allocate a buffer. */
|
||||
/* TODO: Make this dynamic with MSG_PEEK. */
|
||||
file.name = malloc(TAG_BUFFER);
|
||||
if (!file.name)
|
||||
error("recvfd: failed to allocate file.tag buffer\n");
|
||||
|
||||
/*
|
||||
* We need to "recieve" the non-ancillary data even though we don't
|
||||
* plan to use it at all. Otherwise, things won't work as expected.
|
||||
* See unix(7) and other well-hidden documentation.
|
||||
*/
|
||||
iov[0].iov_base = file.name;
|
||||
iov[0].iov_len = TAG_BUFFER;
|
||||
|
||||
msg.msg_name = NULL;
|
||||
msg.msg_namelen = 0;
|
||||
msg.msg_iov = iov;
|
||||
msg.msg_iovlen = 1;
|
||||
msg.msg_control = u.buf;
|
||||
msg.msg_controllen = sizeof(u.buf);
|
||||
|
||||
ssize_t ret = recvmsg(sockfd, &msg, 0);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
|
||||
cmsg = CMSG_FIRSTHDR(&msg);
|
||||
if (!cmsg)
|
||||
error("recvfd: got NULL from CMSG_FIRSTHDR");
|
||||
if (cmsg->cmsg_level != SOL_SOCKET)
|
||||
error("recvfd: expected SOL_SOCKET in cmsg: %d", cmsg->cmsg_level);
|
||||
if (cmsg->cmsg_type != SCM_RIGHTS)
|
||||
error("recvfd: expected SCM_RIGHTS in cmsg: %d", cmsg->cmsg_type);
|
||||
if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
|
||||
error("recvfd: expected correct CMSG_LEN in cmsg: %lu", cmsg->cmsg_len);
|
||||
|
||||
fdptr = (int *) CMSG_DATA(cmsg);
|
||||
if (!fdptr || *fdptr < 0)
|
||||
error("recvfd: recieved invalid pointer");
|
||||
|
||||
file.fd = *fdptr;
|
||||
return file;
|
||||
|
||||
err:
|
||||
olderrno = errno;
|
||||
free(file.name);
|
||||
errno = olderrno;
|
||||
return (struct file_t){0};
|
||||
}
|
38
vendor/github.com/kubernetes-incubator/cri-o/conmon/cmsg.h
generated
vendored
Normal file
38
vendor/github.com/kubernetes-incubator/cri-o/conmon/cmsg.h
generated
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
/*
|
||||
* Copyright 2016 SUSE LLC
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/* NOTE: This code comes directly from runc/libcontainer/utils/cmsg.h. */
|
||||
|
||||
#pragma once
|
||||
|
||||
#if !defined(CMSG_H)
|
||||
#define CMSG_H
|
||||
|
||||
#include <sys/types.h>
|
||||
|
||||
/* TODO: Implement this properly with MSG_PEEK. */
|
||||
#define TAG_BUFFER 4096
|
||||
|
||||
/* This mirrors Go's (*os.File). */
|
||||
struct file_t {
|
||||
char *name;
|
||||
int fd;
|
||||
};
|
||||
|
||||
struct file_t recvfd(int sockfd);
|
||||
ssize_t sendfd(int sockfd, struct file_t file);
|
||||
|
||||
#endif /* !defined(CMSG_H) */
|
610
vendor/github.com/kubernetes-incubator/cri-o/conmon/conmon.c
generated
vendored
Normal file
610
vendor/github.com/kubernetes-incubator/cri-o/conmon/conmon.c
generated
vendored
Normal file
@ -0,0 +1,610 @@
|
||||
#define _GNU_SOURCE
|
||||
#include <ctype.h>
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include <limits.h>
|
||||
#include <stdbool.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <sys/epoll.h>
|
||||
#include <sys/prctl.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/un.h>
|
||||
#include <sys/wait.h>
|
||||
#include <syslog.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include <glib.h>
|
||||
|
||||
#include "cmsg.h"
|
||||
|
||||
#define pexit(fmt, ...) \
|
||||
do { \
|
||||
fprintf(stderr, "[conmon:e]: " fmt " %m\n", ##__VA_ARGS__); \
|
||||
syslog(LOG_ERR, "conmon <error>: " fmt ": %m\n", ##__VA_ARGS__); \
|
||||
exit(EXIT_FAILURE); \
|
||||
} while (0)
|
||||
|
||||
#define nexit(fmt, ...) \
|
||||
do { \
|
||||
fprintf(stderr, "[conmon:e]: " fmt "\n", ##__VA_ARGS__); \
|
||||
syslog(LOG_ERR, "conmon <error>: " fmt " \n", ##__VA_ARGS__); \
|
||||
exit(EXIT_FAILURE); \
|
||||
} while (0)
|
||||
|
||||
#define nwarn(fmt, ...) \
|
||||
do { \
|
||||
fprintf(stderr, "[conmon:w]: " fmt "\n", ##__VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
#define ninfo(fmt, ...) \
|
||||
do { \
|
||||
fprintf(stderr, "[conmon:i]: " fmt "\n", ##__VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
#define _cleanup_(x) __attribute__((cleanup(x)))
|
||||
|
||||
static inline void freep(void *p)
|
||||
{
|
||||
free(*(void **)p);
|
||||
}
|
||||
|
||||
static inline void closep(int *fd)
|
||||
{
|
||||
if (*fd >= 0)
|
||||
close(*fd);
|
||||
*fd = -1;
|
||||
}
|
||||
|
||||
static inline void gstring_free_cleanup(GString **string)
|
||||
{
|
||||
if (*string)
|
||||
g_string_free(*string, TRUE);
|
||||
}
|
||||
|
||||
#define _cleanup_free_ _cleanup_(freep)
|
||||
#define _cleanup_close_ _cleanup_(closep)
|
||||
#define _cleanup_gstring_ _cleanup_(gstring_free_cleanup)
|
||||
|
||||
#define BUF_SIZE 256
|
||||
#define CMD_SIZE 1024
|
||||
#define MAX_EVENTS 10
|
||||
|
||||
static bool terminal = false;
|
||||
static char *cid = NULL;
|
||||
static char *runtime_path = NULL;
|
||||
static char *bundle_path = NULL;
|
||||
static char *pid_file = NULL;
|
||||
static bool systemd_cgroup = false;
|
||||
static bool exec = false;
|
||||
static char *log_path = NULL;
|
||||
static GOptionEntry entries[] =
|
||||
{
|
||||
{ "terminal", 't', 0, G_OPTION_ARG_NONE, &terminal, "Terminal", NULL },
|
||||
{ "cid", 'c', 0, G_OPTION_ARG_STRING, &cid, "Container ID", NULL },
|
||||
{ "runtime", 'r', 0, G_OPTION_ARG_STRING, &runtime_path, "Runtime path", NULL },
|
||||
{ "bundle", 'b', 0, G_OPTION_ARG_STRING, &bundle_path, "Bundle path", NULL },
|
||||
{ "pidfile", 'p', 0, G_OPTION_ARG_STRING, &pid_file, "PID file", NULL },
|
||||
{ "systemd-cgroup", 's', 0, G_OPTION_ARG_NONE, &systemd_cgroup, "Enable systemd cgroup manager", NULL },
|
||||
{ "exec", 'e', 0, G_OPTION_ARG_NONE, &exec, "Exec a command in a running container", NULL },
|
||||
{ "log-path", 'l', 0, G_OPTION_ARG_STRING, &log_path, "Log file path", NULL },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
/* strlen("1997-03-25T13:20:42.999999999+01:00") + 1 */
|
||||
#define TSBUFLEN 36
|
||||
|
||||
int set_k8s_timestamp(char *buf, ssize_t buflen)
|
||||
{
|
||||
struct tm *tm;
|
||||
struct timespec ts;
|
||||
char off_sign = '+';
|
||||
int off, len, err = -1;
|
||||
|
||||
if (clock_gettime(CLOCK_REALTIME, &ts) < 0) {
|
||||
/* If CLOCK_REALTIME is not supported, we set nano seconds to 0 */
|
||||
if (errno == EINVAL) {
|
||||
ts.tv_nsec = 0;
|
||||
} else {
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
if ((tm = localtime(&ts.tv_sec)) == NULL)
|
||||
return err;
|
||||
|
||||
|
||||
off = (int) tm->tm_gmtoff;
|
||||
if (tm->tm_gmtoff < 0) {
|
||||
off_sign = '-';
|
||||
off = -off;
|
||||
}
|
||||
|
||||
len = snprintf(buf, buflen, "%d-%02d-%02dT%02d:%02d:%02d.%09ld%c%02d:%02d",
|
||||
tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday,
|
||||
tm->tm_hour, tm->tm_min, tm->tm_sec, ts.tv_nsec,
|
||||
off_sign, off / 3600, off % 3600);
|
||||
|
||||
if (len < buflen)
|
||||
err = 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
/* stdpipe_t represents one of the std pipes (or NONE). */
|
||||
typedef enum {
|
||||
NO_PIPE,
|
||||
STDIN_PIPE, /* unused */
|
||||
STDOUT_PIPE,
|
||||
STDERR_PIPE,
|
||||
} stdpipe_t;
|
||||
|
||||
const char *stdpipe_name(stdpipe_t pipe)
|
||||
{
|
||||
switch (pipe) {
|
||||
case STDIN_PIPE:
|
||||
return "stdin";
|
||||
case STDOUT_PIPE:
|
||||
return "stdout";
|
||||
case STDERR_PIPE:
|
||||
return "stderr";
|
||||
default:
|
||||
return "NONE";
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* The CRI requires us to write logs with a (timestamp, stream, line) format
|
||||
* for every newline-separated line. write_k8s_log writes said format for every
|
||||
* line in buf, and will partially write the final line of the log if buf is
|
||||
* not terminated by a newline.
|
||||
*/
|
||||
int write_k8s_log(int fd, stdpipe_t pipe, const char *buf, ssize_t buflen)
|
||||
{
|
||||
char tsbuf[TSBUFLEN];
|
||||
static stdpipe_t trailing_line = NO_PIPE;
|
||||
|
||||
/*
|
||||
* Use the same timestamp for every line of the log in this buffer.
|
||||
* There is no practical difference in the output since write(2) is
|
||||
* fast.
|
||||
*/
|
||||
if (set_k8s_timestamp(tsbuf, TSBUFLEN))
|
||||
/* TODO: We should handle failures much more cleanly than this. */
|
||||
return -1;
|
||||
|
||||
while (buflen > 0) {
|
||||
const char *line_end = NULL;
|
||||
ptrdiff_t line_len = 0;
|
||||
|
||||
/* Find the end of the line, or alternatively the end of the buffer. */
|
||||
line_end = memchr(buf, '\n', buflen);
|
||||
if (line_end == NULL)
|
||||
line_end = &buf[buflen-1];
|
||||
line_len = line_end - buf + 1;
|
||||
|
||||
/*
|
||||
* Write the (timestamp, stream) tuple if there isn't any trailing
|
||||
* output from the previous line (or if there is trailing output but
|
||||
* the current buffer being printed is from a different pipe).
|
||||
*/
|
||||
if (trailing_line != pipe) {
|
||||
/*
|
||||
* If there was a trailing line from a different pipe, prepend a
|
||||
* newline to split it properly. This technically breaks the flow
|
||||
* of the previous line (adding a newline in the log where there
|
||||
* wasn't one output) but without modifying the file in a
|
||||
* non-append-only way there's not much we can do.
|
||||
*/
|
||||
char *leading = "";
|
||||
if (trailing_line != NO_PIPE)
|
||||
leading = "\n";
|
||||
|
||||
if (dprintf(fd, "%s%s %s ", leading, tsbuf, stdpipe_name(pipe)) < 0) {
|
||||
nwarn("failed to write (timestamp, stream) to log");
|
||||
goto next;
|
||||
}
|
||||
}
|
||||
|
||||
/* Output the actual contents. */
|
||||
if (write(fd, buf, line_len) < 0) {
|
||||
nwarn("failed to write buffer to log");
|
||||
goto next;
|
||||
}
|
||||
|
||||
/* If we did not output a full line, then we are a trailing_line. */
|
||||
trailing_line = (*line_end == '\n') ? NO_PIPE : pipe;
|
||||
|
||||
next:
|
||||
/* Update the head of the buffer remaining to output. */
|
||||
buf += line_len;
|
||||
buflen -= line_len;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
int ret, runtime_status;
|
||||
char cwd[PATH_MAX];
|
||||
char default_pid_file[PATH_MAX];
|
||||
GError *err = NULL;
|
||||
_cleanup_free_ char *contents;
|
||||
int cpid = -1;
|
||||
int status;
|
||||
pid_t pid, create_pid;
|
||||
_cleanup_close_ int logfd = -1;
|
||||
_cleanup_close_ int masterfd_stdout = -1;
|
||||
_cleanup_close_ int masterfd_stderr = -1;
|
||||
_cleanup_close_ int epfd = -1;
|
||||
_cleanup_close_ int csfd = -1;
|
||||
/* Used for !terminal cases. */
|
||||
int slavefd_stdout = -1;
|
||||
int slavefd_stderr = -1;
|
||||
char csname[PATH_MAX] = "/tmp/conmon-term.XXXXXXXX";
|
||||
char buf[BUF_SIZE];
|
||||
int num_read;
|
||||
struct epoll_event ev;
|
||||
struct epoll_event evlist[MAX_EVENTS];
|
||||
int sync_pipe_fd = -1;
|
||||
char *sync_pipe, *endptr;
|
||||
int len;
|
||||
int num_stdio_fds = 0;
|
||||
GError *error = NULL;
|
||||
GOptionContext *context;
|
||||
_cleanup_gstring_ GString *cmd = NULL;
|
||||
|
||||
/* Command line parameters */
|
||||
context = g_option_context_new("- conmon utility");
|
||||
g_option_context_add_main_entries(context, entries, "conmon");
|
||||
if (!g_option_context_parse(context, &argc, &argv, &error)) {
|
||||
g_print("option parsing failed: %s\n", error->message);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (cid == NULL)
|
||||
nexit("Container ID not provided. Use --cid");
|
||||
|
||||
if (runtime_path == NULL)
|
||||
nexit("Runtime path not provided. Use --runtime");
|
||||
|
||||
if (bundle_path == NULL && !exec) {
|
||||
if (getcwd(cwd, sizeof(cwd)) == NULL) {
|
||||
nexit("Failed to get working directory");
|
||||
}
|
||||
bundle_path = cwd;
|
||||
}
|
||||
|
||||
if (pid_file == NULL) {
|
||||
if (snprintf(default_pid_file, sizeof(default_pid_file),
|
||||
"%s/pidfile-%s", cwd, cid) < 0) {
|
||||
nexit("Failed to generate the pidfile path");
|
||||
}
|
||||
pid_file = default_pid_file;
|
||||
}
|
||||
|
||||
if (log_path == NULL)
|
||||
nexit("Log file path not provided. Use --log-path");
|
||||
|
||||
/* Environment variables */
|
||||
sync_pipe = getenv("_OCI_SYNCPIPE");
|
||||
if (sync_pipe) {
|
||||
errno = 0;
|
||||
sync_pipe_fd = strtol(sync_pipe, &endptr, 10);
|
||||
if (errno != 0 || *endptr != '\0')
|
||||
pexit("unable to parse _OCI_SYNCPIPE");
|
||||
}
|
||||
|
||||
/* Open the log path file. */
|
||||
logfd = open(log_path, O_WRONLY | O_APPEND | O_CREAT);
|
||||
if (logfd < 0)
|
||||
pexit("Failed to open log file");
|
||||
|
||||
/*
|
||||
* Set self as subreaper so we can wait for container process
|
||||
* and return its exit code.
|
||||
*/
|
||||
ret = prctl(PR_SET_CHILD_SUBREAPER, 1, 0, 0, 0);
|
||||
if (ret != 0) {
|
||||
pexit("Failed to set as subreaper");
|
||||
}
|
||||
|
||||
if (terminal) {
|
||||
struct sockaddr_un addr = {0};
|
||||
|
||||
/*
|
||||
* Generate a temporary name. Is this unsafe? Probably, but we can
|
||||
* replace it with a rename(2) setup if necessary.
|
||||
*/
|
||||
|
||||
int unusedfd = g_mkstemp(csname);
|
||||
if (unusedfd < 0)
|
||||
pexit("Failed to generate random path for console-socket");
|
||||
close(unusedfd);
|
||||
|
||||
addr.sun_family = AF_UNIX;
|
||||
strncpy(addr.sun_path, csname, sizeof(addr.sun_path)-1);
|
||||
|
||||
ninfo("addr{sun_family=AF_UNIX, sun_path=%s}", addr.sun_path);
|
||||
|
||||
/* Bind to the console socket path. */
|
||||
csfd = socket(AF_UNIX, SOCK_STREAM|SOCK_CLOEXEC, 0);
|
||||
if (csfd < 0)
|
||||
pexit("Failed to create console-socket");
|
||||
/* XXX: This should be handled with a rename(2). */
|
||||
if (unlink(csname) < 0)
|
||||
pexit("Failed to unlink temporary ranom path");
|
||||
if (bind(csfd, (struct sockaddr *) &addr, sizeof(addr)) < 0)
|
||||
pexit("Failed to bind to console-socket");
|
||||
if (listen(csfd, 128) < 0)
|
||||
pexit("Failed to listen on console-socket");
|
||||
} else {
|
||||
int fds[2];
|
||||
|
||||
/*
|
||||
* Create a "fake" master fd so that we can use the same epoll code in
|
||||
* both cases. The slavefd_*s will be closed after we dup over
|
||||
* everything.
|
||||
*
|
||||
* We use pipes here because open(/dev/std{out,err}) will fail if we
|
||||
* used anything else (and it wouldn't be a good idea to create a new
|
||||
* pty pair in the host).
|
||||
*/
|
||||
if (pipe(fds) < 0)
|
||||
pexit("Failed to create !terminal stdout pipe");
|
||||
|
||||
masterfd_stdout = fds[0];
|
||||
slavefd_stdout = fds[1];
|
||||
|
||||
if (pipe(fds) < 0)
|
||||
pexit("Failed to create !terminal stderr pipe");
|
||||
|
||||
masterfd_stderr = fds[0];
|
||||
slavefd_stderr = fds[1];
|
||||
}
|
||||
|
||||
cmd = g_string_new(runtime_path);
|
||||
|
||||
/* Generate the cmdline. */
|
||||
if (!exec && systemd_cgroup)
|
||||
g_string_append_printf(cmd, " --systemd-cgroup");
|
||||
|
||||
if (exec)
|
||||
g_string_append_printf(cmd, " exec -d --pid-file %s", pid_file);
|
||||
else
|
||||
g_string_append_printf(cmd, " create --bundle %s --pid-file %s", bundle_path, pid_file);
|
||||
|
||||
if (terminal)
|
||||
g_string_append_printf(cmd, " --console-socket %s", csname);
|
||||
|
||||
/* Container name comes last. */
|
||||
g_string_append_printf(cmd, " %s", cid);
|
||||
|
||||
/* Set the exec arguments. */
|
||||
if (exec) {
|
||||
/*
|
||||
* FIXME: This code is broken if argv[1] contains spaces or other
|
||||
* similar characters that shells don't like. It's a bit silly
|
||||
* that we're doing things inside a shell at all -- this should
|
||||
* all be done in arrays.
|
||||
*/
|
||||
|
||||
int i;
|
||||
for (i = 1; i < argc; i++)
|
||||
g_string_append_printf(cmd, " %s", argv[i]);
|
||||
}
|
||||
|
||||
/*
|
||||
* We have to fork here because the current runC API dups the stdio of the
|
||||
* calling process over the container's fds. This is actually *very bad*
|
||||
* but is currently being discussed for change in
|
||||
* https://github.com/opencontainers/runtime-spec/pull/513. Hopefully this
|
||||
* won't be the case for very long.
|
||||
*/
|
||||
|
||||
/* Create our container. */
|
||||
create_pid = fork();
|
||||
if (create_pid < 0) {
|
||||
pexit("Failed to fork the create command");
|
||||
} else if (!create_pid) {
|
||||
char *argv[] = {"sh", "-c", cmd->str, NULL};
|
||||
|
||||
/* We only need to touch the stdio if we have terminal=false. */
|
||||
/* FIXME: This results in us not outputting runc error messages to ocid's log. */
|
||||
if (slavefd_stdout >= 0) {
|
||||
if (dup2(slavefd_stdout, STDOUT_FILENO) < 0)
|
||||
pexit("Failed to dup over stdout");
|
||||
}
|
||||
if (slavefd_stderr >= 0) {
|
||||
if (dup2(slavefd_stderr, STDERR_FILENO) < 0)
|
||||
pexit("Failed to dup over stderr");
|
||||
}
|
||||
|
||||
/* Exec into the process. TODO: Don't use the shell. */
|
||||
execv("/bin/sh", argv);
|
||||
exit(127);
|
||||
}
|
||||
|
||||
/* The runtime has that fd now. We don't need to touch it anymore. */
|
||||
close(slavefd_stdout);
|
||||
close(slavefd_stderr);
|
||||
|
||||
/* Get the console fd. */
|
||||
/*
|
||||
* FIXME: If runc fails to start a container, we won't bail because we're
|
||||
* busy waiting for requests. The solution probably involves
|
||||
* epoll(2) and a signalfd(2). This causes a lot of issues.
|
||||
*/
|
||||
if (terminal) {
|
||||
struct file_t console;
|
||||
int connfd = -1;
|
||||
|
||||
ninfo("about to accept from csfd: %d", csfd);
|
||||
connfd = accept4(csfd, NULL, NULL, SOCK_CLOEXEC);
|
||||
if (connfd < 0)
|
||||
pexit("Failed to accept console-socket connection");
|
||||
|
||||
/* Not accepting anything else. */
|
||||
close(csfd);
|
||||
unlink(csname);
|
||||
|
||||
/* We exit if this fails. */
|
||||
ninfo("about to recvfd from connfd: %d", connfd);
|
||||
console = recvfd(connfd);
|
||||
|
||||
ninfo("console = {.name = '%s'; .fd = %d}", console.name, console.fd);
|
||||
free(console.name);
|
||||
|
||||
/* We only have a single fd for both pipes, so we just treat it as
|
||||
* stdout. stderr is ignored. */
|
||||
masterfd_stdout = console.fd;
|
||||
masterfd_stderr = -1;
|
||||
|
||||
/* Clean up everything */
|
||||
close(connfd);
|
||||
}
|
||||
|
||||
ninfo("about to waitpid: %d", create_pid);
|
||||
|
||||
/* Wait for our create child to exit with the return code. */
|
||||
if (waitpid(create_pid, &runtime_status, 0) < 0) {
|
||||
int old_errno = errno;
|
||||
kill(create_pid, SIGKILL);
|
||||
errno = old_errno;
|
||||
pexit("Failed to wait for `runtime %s`", exec ? "exec" : "create");
|
||||
}
|
||||
if (!WIFEXITED(runtime_status) || WEXITSTATUS(runtime_status) != 0)
|
||||
nexit("Failed to create container: exit status %d", WEXITSTATUS(runtime_status));
|
||||
|
||||
/* Read the pid so we can wait for the process to exit */
|
||||
g_file_get_contents(pid_file, &contents, NULL, &err);
|
||||
if (err) {
|
||||
nwarn("Failed to read pidfile: %s", err->message);
|
||||
g_error_free(err);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
cpid = atoi(contents);
|
||||
ninfo("container PID: %d", cpid);
|
||||
|
||||
/* Send the container pid back to parent */
|
||||
if (sync_pipe_fd > 0 && !exec) {
|
||||
len = snprintf(buf, BUF_SIZE, "{\"pid\": %d}\n", cpid);
|
||||
if (len < 0 || write(sync_pipe_fd, buf, len) != len) {
|
||||
pexit("unable to send container pid to parent");
|
||||
}
|
||||
}
|
||||
|
||||
/* Create epoll_ctl so that we can handle read/write events. */
|
||||
/*
|
||||
* TODO: Switch to libuv so that we can also implement exec as well as
|
||||
* attach and other important things. Using epoll directly is just
|
||||
* really nasty.
|
||||
*/
|
||||
epfd = epoll_create(5);
|
||||
if (epfd < 0)
|
||||
pexit("epoll_create");
|
||||
ev.events = EPOLLIN;
|
||||
if (masterfd_stdout >= 0) {
|
||||
ev.data.fd = masterfd_stdout;
|
||||
if (epoll_ctl(epfd, EPOLL_CTL_ADD, ev.data.fd, &ev) < 0)
|
||||
pexit("Failed to add console masterfd_stdout to epoll");
|
||||
num_stdio_fds++;
|
||||
}
|
||||
if (masterfd_stderr >= 0) {
|
||||
ev.data.fd = masterfd_stderr;
|
||||
if (epoll_ctl(epfd, EPOLL_CTL_ADD, ev.data.fd, &ev) < 0)
|
||||
pexit("Failed to add console masterfd_stderr to epoll");
|
||||
num_stdio_fds++;
|
||||
}
|
||||
|
||||
/* Log all of the container's output. */
|
||||
while (num_stdio_fds > 0) {
|
||||
int ready = epoll_wait(epfd, evlist, MAX_EVENTS, -1);
|
||||
if (ready < 0)
|
||||
continue;
|
||||
|
||||
for (int i = 0; i < ready; i++) {
|
||||
if (evlist[i].events & EPOLLIN) {
|
||||
int masterfd = evlist[i].data.fd;
|
||||
stdpipe_t pipe;
|
||||
if (masterfd == masterfd_stdout)
|
||||
pipe = STDOUT_PIPE;
|
||||
else if (masterfd == masterfd_stderr)
|
||||
pipe = STDERR_PIPE;
|
||||
else {
|
||||
nwarn("unknown pipe fd");
|
||||
goto out;
|
||||
}
|
||||
|
||||
num_read = read(masterfd, buf, BUF_SIZE);
|
||||
if (num_read <= 0)
|
||||
goto out;
|
||||
|
||||
if (write_k8s_log(logfd, pipe, buf, num_read) < 0) {
|
||||
nwarn("write_k8s_log failed");
|
||||
goto out;
|
||||
}
|
||||
} else if (evlist[i].events & (EPOLLHUP | EPOLLERR)) {
|
||||
printf("closing fd %d\n", evlist[i].data.fd);
|
||||
if (close(evlist[i].data.fd) < 0)
|
||||
pexit("close");
|
||||
num_stdio_fds--;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
/* Wait for the container process and record its exit code */
|
||||
while ((pid = waitpid(-1, &status, 0)) > 0) {
|
||||
int exit_status = WEXITSTATUS(status);
|
||||
|
||||
printf("PID %d exited with status %d\n", pid, exit_status);
|
||||
if (pid == cpid) {
|
||||
if (!exec) {
|
||||
_cleanup_free_ char *status_str = NULL;
|
||||
ret = asprintf(&status_str, "%d", exit_status);
|
||||
if (ret < 0) {
|
||||
pexit("Failed to allocate memory for status");
|
||||
}
|
||||
g_file_set_contents("exit", status_str,
|
||||
strlen(status_str), &err);
|
||||
if (err) {
|
||||
fprintf(stderr,
|
||||
"Failed to write %s to exit file: %s\n",
|
||||
status_str, err->message);
|
||||
g_error_free(err);
|
||||
exit(1);
|
||||
}
|
||||
} else {
|
||||
/* Send the command exec exit code back to the parent */
|
||||
if (sync_pipe_fd > 0) {
|
||||
len = snprintf(buf, BUF_SIZE, "{\"exit_code\": %d}\n", exit_status);
|
||||
if (len < 0 || write(sync_pipe_fd, buf, len) != len) {
|
||||
pexit("unable to send exit status");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (exec && pid < 0 && errno == ECHILD && sync_pipe_fd > 0) {
|
||||
/*
|
||||
* waitpid failed and set errno to ECHILD:
|
||||
* The runtime exec call did not create any child
|
||||
* process and we can send the system() exit code
|
||||
* to the parent.
|
||||
*/
|
||||
len = snprintf(buf, BUF_SIZE, "{\"exit_code\": %d}\n", WEXITSTATUS(runtime_status));
|
||||
if (len < 0 || write(sync_pipe_fd, buf, len) != len) {
|
||||
pexit("unable to send exit status");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
return EXIT_SUCCESS;
|
||||
}
|
2
vendor/github.com/kubernetes-incubator/cri-o/pkg/ocicni/ocicni.go
generated
vendored
2
vendor/github.com/kubernetes-incubator/cri-o/pkg/ocicni/ocicni.go
generated
vendored
@ -7,10 +7,10 @@ import (
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containernetworking/cni/libcni"
|
||||
cnitypes "github.com/containernetworking/cni/pkg/types"
|
||||
"github.com/fsnotify/fsnotify"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type cniNetworkPlugin struct {
|
||||
|
1
vendor/github.com/opencontainers/go-digest/.mailmap
generated
vendored
1
vendor/github.com/opencontainers/go-digest/.mailmap
generated
vendored
@ -1 +0,0 @@
|
||||
Stephen J Day <stephen.day@docker.com> <stevvooe@users.noreply.github.com>
|
12
vendor/github.com/opencontainers/go-digest/.pullapprove.yml
generated
vendored
12
vendor/github.com/opencontainers/go-digest/.pullapprove.yml
generated
vendored
@ -1,12 +0,0 @@
|
||||
approve_by_comment: true
|
||||
approve_regex: '^(Approved|lgtm|LGTM|:shipit:|:star:|:\+1:|:ship:)'
|
||||
reject_regex: ^Rejected
|
||||
reset_on_push: true
|
||||
author_approval: ignored
|
||||
signed_off_by:
|
||||
required: true
|
||||
reviewers:
|
||||
teams:
|
||||
- go-digest-maintainers
|
||||
name: default
|
||||
required: 2
|
4
vendor/github.com/opencontainers/go-digest/.travis.yml
generated
vendored
4
vendor/github.com/opencontainers/go-digest/.travis.yml
generated
vendored
@ -1,4 +0,0 @@
|
||||
language: go
|
||||
go:
|
||||
- 1.7
|
||||
- master
|
72
vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md
generated
vendored
72
vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md
generated
vendored
@ -1,72 +0,0 @@
|
||||
# Contributing to Docker open source projects
|
||||
|
||||
Want to hack on this project? Awesome! Here are instructions to get you started.
|
||||
|
||||
This project is a part of the [Docker](https://www.docker.com) project, and follows
|
||||
the same rules and principles. If you're already familiar with the way
|
||||
Docker does things, you'll feel right at home.
|
||||
|
||||
Otherwise, go read Docker's
|
||||
[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md),
|
||||
[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md),
|
||||
[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and
|
||||
[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md).
|
||||
|
||||
For an in-depth description of our contribution process, visit the
|
||||
contributors guide: [Understand how to contribute](https://docs.docker.com/opensource/workflow/make-a-contribution/)
|
||||
|
||||
### Sign your work
|
||||
|
||||
The sign-off is a simple line at the end of the explanation for the patch. Your
|
||||
signature certifies that you wrote the patch or otherwise have the right to pass
|
||||
it on as an open-source patch. The rules are pretty simple: if you can certify
|
||||
the below (from [developercertificate.org](http://developercertificate.org/)):
|
||||
|
||||
```
|
||||
Developer Certificate of Origin
|
||||
Version 1.1
|
||||
|
||||
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||
1 Letterman Drive
|
||||
Suite D4700
|
||||
San Francisco, CA, 94129
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies of this
|
||||
license document, but changing it is not allowed.
|
||||
|
||||
|
||||
Developer's Certificate of Origin 1.1
|
||||
|
||||
By making a contribution to this project, I certify that:
|
||||
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
||||
```
|
||||
|
||||
Then you just add a line to every git commit message:
|
||||
|
||||
Signed-off-by: Joe Smith <joe.smith@email.com>
|
||||
|
||||
Use your real name (sorry, no pseudonyms or anonymous contributions.)
|
||||
|
||||
If you set your `user.name` and `user.email` git configs, you can sign your
|
||||
commit automatically with `git commit -s`.
|
9
vendor/github.com/opencontainers/go-digest/MAINTAINERS
generated
vendored
9
vendor/github.com/opencontainers/go-digest/MAINTAINERS
generated
vendored
@ -1,9 +0,0 @@
|
||||
Aaron Lehmann <aaron.lehmann@docker.com> (@aaronlehmann)
|
||||
Brandon Philips <brandon.philips@coreos.com> (@philips)
|
||||
Brendan Burns <bburns@microsoft.com> (@brendandburns)
|
||||
Derek McGowan <derek@mcgstyle.net> (@dmcgowan)
|
||||
Jason Bouzane <jbouzane@google.com> (@jbouzane)
|
||||
John Starks <jostarks@microsoft.com> (@jstarks)
|
||||
Jonathan Boulle <jon.boulle@coreos.com> (@jonboulle)
|
||||
Stephen Day <stephen.day@docker.com> (@stevvooe)
|
||||
Vincent Batts <vbatts@redhat.com> (@vbatts)
|
167
vendor/github.com/opencontainers/image-spec/README.md
generated
vendored
Normal file
167
vendor/github.com/opencontainers/image-spec/README.md
generated
vendored
Normal file
@ -0,0 +1,167 @@
|
||||
# OCI Image Format Specification
|
||||
<div>
|
||||
<a href="https://travis-ci.org/opencontainers/image-spec">
|
||||
<img src="https://travis-ci.org/opencontainers/image-spec.svg?branch=master"></img>
|
||||
</a>
|
||||
</div>
|
||||
|
||||
The OCI Image Format project creates and maintains the software shipping container image format spec (OCI Image Format).
|
||||
|
||||
**[The specification can be found here](spec.md).**
|
||||
|
||||
This repository also provides [Go types](specs-go), [intra-blob validation tooling, and JSON Schema](schema).
|
||||
The Go types and validation should be compatible with the current Go release; earlier Go releases are not supported.
|
||||
|
||||
Additional documentation about how this group operates:
|
||||
|
||||
- [Code of Conduct](https://github.com/opencontainers/tob/blob/d2f9d68c1332870e40693fe077d311e0742bc73d/code-of-conduct.md)
|
||||
- [Roadmap](#roadmap)
|
||||
- [Releases](RELEASES.md)
|
||||
- [Project Documentation](project.md)
|
||||
|
||||
The _optional_ and _base_ layers of all OCI projects are tracked in the [OCI Scope Table](https://www.opencontainers.org/about/oci-scope-table).
|
||||
|
||||
## Running an OCI Image
|
||||
|
||||
The OCI Image Format partner project is the [OCI Runtime Spec project](https://github.com/opencontainers/runtime-spec).
|
||||
The Runtime Specification outlines how to run a "[filesystem bundle](https://github.com/opencontainers/runtime-spec/blob/master/bundle.md)" that is unpacked on disk.
|
||||
At a high-level an OCI implementation would download an OCI Image then unpack that image into an OCI Runtime filesystem bundle.
|
||||
At this point the OCI Runtime Bundle would be run by an OCI Runtime.
|
||||
|
||||
This entire workflow supports the UX that users have come to expect from container engines like Docker and rkt: primarily, the ability to run an image with no additional arguments:
|
||||
|
||||
* docker run example.com/org/app:v1.0.0
|
||||
* rkt run example.com/org/app,version=v1.0.0
|
||||
|
||||
To support this UX the OCI Image Format contains sufficient information to launch the application on the target platform (e.g. command, arguments, environment variables, etc).
|
||||
|
||||
## FAQ
|
||||
|
||||
**Q: Why doesn't this project mention distribution?**
|
||||
|
||||
A: Distribution, for example using HTTP as both Docker v2.2 and AppC do today, is currently out of scope on the [OCI Scope Table](https://www.opencontainers.org/about/oci-scope-table).
|
||||
There has been [some discussion on the TOB mailing list](https://groups.google.com/a/opencontainers.org/d/msg/tob/A3JnmI-D-6Y/tLuptPDHAgAJ) to make distribution an optional layer, but this topic is a work in progress.
|
||||
|
||||
**Q: What happens to AppC or Docker Image Formats?**
|
||||
|
||||
A: Existing formats can continue to be a proving ground for technologies, as needed.
|
||||
The OCI Image Format project strives to provide a dependable open specification that can be shared between different tools and be evolved for years or decades of compatibility; as the deb and rpm format have.
|
||||
|
||||
Find more [FAQ on the OCI site](https://www.opencontainers.org/faq).
|
||||
|
||||
## Roadmap
|
||||
|
||||
The [GitHub milestones](https://github.com/opencontainers/image-spec/milestones) lay out the path to the OCI v1.0.0 release in late 2016.
|
||||
|
||||
# Contributing
|
||||
|
||||
Development happens on GitHub for the spec.
|
||||
Issues are used for bugs and actionable items and longer discussions can happen on the [mailing list](#mailing-list).
|
||||
|
||||
The specification and code is licensed under the Apache 2.0 license found in the `LICENSE` file of this repository.
|
||||
|
||||
## Discuss your design
|
||||
|
||||
The project welcomes submissions, but please let everyone know what you are working on.
|
||||
|
||||
Before undertaking a nontrivial change to this specification, send mail to the [mailing list](#mailing-list) to discuss what you plan to do.
|
||||
This gives everyone a chance to validate the design, helps prevent duplication of effort, and ensures that the idea fits.
|
||||
It also guarantees that the design is sound before code is written; a GitHub pull-request is not the place for high-level discussions.
|
||||
|
||||
Typos and grammatical errors can go straight to a pull-request.
|
||||
When in doubt, start on the [mailing-list](#mailing-list).
|
||||
|
||||
## Weekly Call
|
||||
|
||||
The contributors and maintainers of all OCI projects have a weekly meeting Wednesdays at 2:00 PM (USA Pacific).
|
||||
Everyone is welcome to participate via [UberConference web][UberConference] or audio-only: +1-415-968-0849 (no PIN needed).
|
||||
An initial agenda will be posted to the [mailing list](#mailing-list) earlier in the week, and everyone is welcome to propose additional topics or suggest other agenda alterations there.
|
||||
Minutes are posted to the [mailing list](#mailing-list) and minutes from past calls are archived [here][minutes].
|
||||
|
||||
## Mailing List
|
||||
|
||||
You can subscribe and join the mailing list on [Google Groups](https://groups.google.com/a/opencontainers.org/forum/#!forum/dev).
|
||||
|
||||
## IRC
|
||||
|
||||
OCI discussion happens on #opencontainers on Freenode ([logs][irc-logs]).
|
||||
|
||||
## Markdown style
|
||||
|
||||
To keep consistency throughout the Markdown files in the Open Container spec all files should be formatted one sentence per line.
|
||||
This fixes two things: it makes diffing easier with git and it resolves fights about line wrapping length.
|
||||
For example, this paragraph will span three lines in the Markdown source.
|
||||
|
||||
## Git commit
|
||||
|
||||
### Sign your work
|
||||
|
||||
The sign-off is a simple line at the end of the explanation for the patch, which certifies that you wrote it or otherwise have the right to pass it on as an open-source patch.
|
||||
The rules are pretty simple: if you can certify the below (from [developercertificate.org](http://developercertificate.org/)):
|
||||
|
||||
```
|
||||
Developer Certificate of Origin
|
||||
Version 1.1
|
||||
|
||||
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||
660 York Street, Suite 102,
|
||||
San Francisco, CA 94110 USA
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies of this
|
||||
license document, but changing it is not allowed.
|
||||
|
||||
|
||||
Developer's Certificate of Origin 1.1
|
||||
|
||||
By making a contribution to this project, I certify that:
|
||||
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
||||
```
|
||||
|
||||
then you just add a line to every git commit message:
|
||||
|
||||
Signed-off-by: Joe Smith <joe@gmail.com>
|
||||
|
||||
using your real name (sorry, no pseudonyms or anonymous contributions.)
|
||||
|
||||
You can add the sign off when creating the git commit via `git commit -s`.
|
||||
|
||||
### Commit Style
|
||||
|
||||
Simple house-keeping for clean git history.
|
||||
Read more on [How to Write a Git Commit Message](http://chris.beams.io/posts/git-commit/) or the Discussion section of [`git-commit(1)`](http://git-scm.com/docs/git-commit).
|
||||
|
||||
1. Separate the subject from body with a blank line
|
||||
2. Limit the subject line to 50 characters
|
||||
3. Capitalize the subject line
|
||||
4. Do not end the subject line with a period
|
||||
5. Use the imperative mood in the subject line
|
||||
6. Wrap the body at 72 characters
|
||||
7. Use the body to explain what and why vs. how
|
||||
* If there was important/useful/essential conversation or information, copy or include a reference
|
||||
8. When possible, one keyword to scope the change in the subject (i.e. "README: ...", "runtime: ...")
|
||||
|
||||
|
||||
[UberConference]: https://www.uberconference.com/opencontainers
|
||||
[irc-logs]: http://ircbot.wl.linuxfoundation.org/eavesdrop/%23opencontainers/
|
||||
[minutes]: http://ircbot.wl.linuxfoundation.org/meetings/opencontainers/
|
244
vendor/github.com/opencontainers/runc/README.md
generated
vendored
Normal file
244
vendor/github.com/opencontainers/runc/README.md
generated
vendored
Normal file
@ -0,0 +1,244 @@
|
||||
# runc
|
||||
|
||||
[](https://travis-ci.org/opencontainers/runc)
|
||||
[](https://goreportcard.com/report/github.com/opencontainers/runc)
|
||||
[](https://godoc.org/github.com/opencontainers/runc)
|
||||
|
||||
## Introduction
|
||||
|
||||
`runc` is a CLI tool for spawning and running containers according to the OCI specification.
|
||||
|
||||
## Releases
|
||||
|
||||
`runc` depends on and tracks the [runtime-spec](https://github.com/opencontainers/runtime-spec) repository.
|
||||
We will try to make sure that `runc` and the OCI specification major versions stay in lockstep.
|
||||
This means that `runc` 1.0.0 should implement the 1.0 version of the specification.
|
||||
|
||||
You can find official releases of `runc` on the [release](https://github.com/opencontainers/runc/releases) page.
|
||||
|
||||
### Security
|
||||
|
||||
If you wish to report a security issue, please disclose the issue responsibly
|
||||
to security@opencontainers.org.
|
||||
|
||||
## Building
|
||||
|
||||
`runc` currently supports the Linux platform with various architecture support.
|
||||
It must be built with Go version 1.6 or higher in order for some features to function properly.
|
||||
|
||||
In order to enable seccomp support you will need to install `libseccomp` on your platform.
|
||||
> e.g. `libseccomp-devel` for CentOS, or `libseccomp-dev` for Ubuntu
|
||||
|
||||
Otherwise, if you do not want to build `runc` with seccomp support you can add `BUILDTAGS=""` when running make.
|
||||
|
||||
```bash
|
||||
# create a 'github.com/opencontainers' in your GOPATH/src
|
||||
cd github.com/opencontainers
|
||||
git clone https://github.com/opencontainers/runc
|
||||
cd runc
|
||||
|
||||
make
|
||||
sudo make install
|
||||
```
|
||||
|
||||
`runc` will be installed to `/usr/local/sbin/runc` on your system.
|
||||
|
||||
#### Build Tags
|
||||
|
||||
`runc` supports optional build tags for compiling support of various features.
|
||||
To add build tags to the make option the `BUILDTAGS` variable must be set.
|
||||
|
||||
```bash
|
||||
make BUILDTAGS='seccomp apparmor'
|
||||
```
|
||||
|
||||
| Build Tag | Feature | Dependency |
|
||||
|-----------|------------------------------------|-------------|
|
||||
| seccomp | Syscall filtering | libseccomp |
|
||||
| selinux | selinux process and mount labeling | <none> |
|
||||
| apparmor | apparmor profile support | libapparmor |
|
||||
| ambient | ambient capability support | kernel 4.3 |
|
||||
|
||||
|
||||
### Running the test suite
|
||||
|
||||
`runc` currently supports running its test suite via Docker.
|
||||
To run the suite just type `make test`.
|
||||
|
||||
```bash
|
||||
make test
|
||||
```
|
||||
|
||||
There are additional make targets for running the tests outside of a container but this is not recommended as the tests are written with the expectation that they can write and remove anywhere.
|
||||
|
||||
You can run a specific test case by setting the `TESTFLAGS` variable.
|
||||
|
||||
```bash
|
||||
# make test TESTFLAGS="-run=SomeTestFunction"
|
||||
```
|
||||
|
||||
### Dependencies Management
|
||||
|
||||
`runc` uses [vndr](https://github.com/LK4D4/vndr) for dependencies management.
|
||||
Please refer to [vndr](https://github.com/LK4D4/vndr) for how to add or update
|
||||
new dependencies.
|
||||
|
||||
## Using runc
|
||||
|
||||
### Creating an OCI Bundle
|
||||
|
||||
In order to use runc you must have your container in the format of an OCI bundle.
|
||||
If you have Docker installed you can use its `export` method to acquire a root filesystem from an existing Docker container.
|
||||
|
||||
```bash
|
||||
# create the top most bundle directory
|
||||
mkdir /mycontainer
|
||||
cd /mycontainer
|
||||
|
||||
# create the rootfs directory
|
||||
mkdir rootfs
|
||||
|
||||
# export busybox via Docker into the rootfs directory
|
||||
docker export $(docker create busybox) | tar -C rootfs -xvf -
|
||||
```
|
||||
|
||||
After a root filesystem is populated you just generate a spec in the format of a `config.json` file inside your bundle.
|
||||
`runc` provides a `spec` command to generate a base template spec that you are then able to edit.
|
||||
To find features and documentation for fields in the spec please refer to the [specs](https://github.com/opencontainers/runtime-spec) repository.
|
||||
|
||||
```bash
|
||||
runc spec
|
||||
```
|
||||
|
||||
### Running Containers
|
||||
|
||||
Assuming you have an OCI bundle from the previous step you can execute the container in two different ways.
|
||||
|
||||
The first way is to use the convenience command `run` that will handle creating, starting, and deleting the container after it exits.
|
||||
|
||||
```bash
|
||||
# run as root
|
||||
cd /mycontainer
|
||||
runc run mycontainerid
|
||||
```
|
||||
|
||||
If you used the unmodified `runc spec` template this should give you a `sh` session inside the container.
|
||||
|
||||
The second way to start a container is using the specs lifecycle operations.
|
||||
This gives you more power over how the container is created and managed while it is running.
|
||||
This will also launch the container in the background so you will have to edit the `config.json` to remove the `terminal` setting for the simple examples here.
|
||||
Your process field in the `config.json` should look like this below with `"terminal": false` and `"args": ["sleep", "5"]`.
|
||||
|
||||
|
||||
```json
|
||||
"process": {
|
||||
"terminal": false,
|
||||
"user": {
|
||||
"uid": 0,
|
||||
"gid": 0
|
||||
},
|
||||
"args": [
|
||||
"sleep", "5"
|
||||
],
|
||||
"env": [
|
||||
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
|
||||
"TERM=xterm"
|
||||
],
|
||||
"cwd": "/",
|
||||
"capabilities": {
|
||||
"bounding": [
|
||||
"CAP_AUDIT_WRITE",
|
||||
"CAP_KILL",
|
||||
"CAP_NET_BIND_SERVICE"
|
||||
],
|
||||
"effective": [
|
||||
"CAP_AUDIT_WRITE",
|
||||
"CAP_KILL",
|
||||
"CAP_NET_BIND_SERVICE"
|
||||
],
|
||||
"inheritable": [
|
||||
"CAP_AUDIT_WRITE",
|
||||
"CAP_KILL",
|
||||
"CAP_NET_BIND_SERVICE"
|
||||
],
|
||||
"permitted": [
|
||||
"CAP_AUDIT_WRITE",
|
||||
"CAP_KILL",
|
||||
"CAP_NET_BIND_SERVICE"
|
||||
],
|
||||
"ambient": [
|
||||
"CAP_AUDIT_WRITE",
|
||||
"CAP_KILL",
|
||||
"CAP_NET_BIND_SERVICE"
|
||||
]
|
||||
},
|
||||
"rlimits": [
|
||||
{
|
||||
"type": "RLIMIT_NOFILE",
|
||||
"hard": 1024,
|
||||
"soft": 1024
|
||||
}
|
||||
],
|
||||
"noNewPrivileges": true
|
||||
},
|
||||
```
|
||||
|
||||
Now we can go through the lifecycle operations in your shell.
|
||||
|
||||
|
||||
```bash
|
||||
# run as root
|
||||
cd /mycontainer
|
||||
runc create mycontainerid
|
||||
|
||||
# view the container is created and in the "created" state
|
||||
runc list
|
||||
|
||||
# start the process inside the container
|
||||
runc start mycontainerid
|
||||
|
||||
# after 5 seconds view that the container has exited and is now in the stopped state
|
||||
runc list
|
||||
|
||||
# now delete the container
|
||||
runc delete mycontainerid
|
||||
```
|
||||
|
||||
This adds more complexity but allows higher level systems to manage runc and provides points in the containers creation to setup various settings after the container has created and/or before it is deleted.
|
||||
This is commonly used to setup the container's network stack after `create` but before `start` where the user's defined process will be running.
|
||||
|
||||
#### Rootless containers
|
||||
`runc` has the ability to run containers without root privileges. This is called `rootless`. You need to pass some parameters to `runc` in order to run rootless containers. See below and compare with the previous version. Run the following commands as an ordinary user:
|
||||
```bash
|
||||
# Same as the first example
|
||||
mkdir ~/mycontainer
|
||||
cd ~/mycontainer
|
||||
mkdir rootfs
|
||||
docker export $(docker create busybox) | tar -C rootfs -xvf -
|
||||
|
||||
# The --rootless parameter instructs runc spec to generate a configuration for a rootless container, which will allow you to run the container as a non-root user.
|
||||
runc spec --rootless
|
||||
|
||||
# The --root parameter tells runc where to store the container state. It must be writable by the user.
|
||||
runc --root /tmp/runc run mycontainerid
|
||||
```
|
||||
|
||||
#### Supervisors
|
||||
|
||||
`runc` can be used with process supervisors and init systems to ensure that containers are restarted when they exit.
|
||||
An example systemd unit file looks something like this.
|
||||
|
||||
```systemd
|
||||
[Unit]
|
||||
Description=Start My Container
|
||||
|
||||
[Service]
|
||||
Type=forking
|
||||
ExecStart=/usr/local/sbin/runc run -d --pid-file /run/mycontainerid.pid mycontainerid
|
||||
ExecStopPost=/usr/local/sbin/runc delete mycontainerid
|
||||
WorkingDirectory=/mycontainer
|
||||
PIDFile=/run/mycontainerid.pid
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
```
|
328
vendor/github.com/opencontainers/runc/libcontainer/README.md
generated
vendored
Normal file
328
vendor/github.com/opencontainers/runc/libcontainer/README.md
generated
vendored
Normal file
@ -0,0 +1,328 @@
|
||||
# libcontainer
|
||||
|
||||
[](https://godoc.org/github.com/opencontainers/runc/libcontainer)
|
||||
|
||||
Libcontainer provides a native Go implementation for creating containers
|
||||
with namespaces, cgroups, capabilities, and filesystem access controls.
|
||||
It allows you to manage the lifecycle of the container performing additional operations
|
||||
after the container is created.
|
||||
|
||||
|
||||
#### Container
|
||||
A container is a self contained execution environment that shares the kernel of the
|
||||
host system and which is (optionally) isolated from other containers in the system.
|
||||
|
||||
#### Using libcontainer
|
||||
|
||||
Because containers are spawned in a two step process you will need a binary that
|
||||
will be executed as the init process for the container. In libcontainer, we use
|
||||
the current binary (/proc/self/exe) to be executed as the init process, and use
|
||||
arg "init", we call the first step process "bootstrap", so you always need a "init"
|
||||
function as the entry of "bootstrap".
|
||||
|
||||
In addition to the go init function the early stage bootstrap is handled by importing
|
||||
[nsenter](https://github.com/opencontainers/runc/blob/master/libcontainer/nsenter/README.md).
|
||||
|
||||
```go
|
||||
import (
|
||||
_ "github.com/opencontainers/runc/libcontainer/nsenter"
|
||||
)
|
||||
|
||||
func init() {
|
||||
if len(os.Args) > 1 && os.Args[1] == "init" {
|
||||
runtime.GOMAXPROCS(1)
|
||||
runtime.LockOSThread()
|
||||
factory, _ := libcontainer.New("")
|
||||
if err := factory.StartInitialization(); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
panic("--this line should have never been executed, congratulations--")
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Then to create a container you first have to initialize an instance of a factory
|
||||
that will handle the creation and initialization for a container.
|
||||
|
||||
```go
|
||||
factory, err := libcontainer.New("/var/lib/container", libcontainer.Cgroupfs, libcontainer.InitArgs(os.Args[0], "init"))
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
return
|
||||
}
|
||||
```
|
||||
|
||||
Once you have an instance of the factory created we can create a configuration
|
||||
struct describing how the container is to be created. A sample would look similar to this:
|
||||
|
||||
```go
|
||||
defaultMountFlags := unix.MS_NOEXEC | unix.MS_NOSUID | unix.MS_NODEV
|
||||
config := &configs.Config{
|
||||
Rootfs: "/your/path/to/rootfs",
|
||||
Capabilities: &configs.Capabilities{
|
||||
Bounding: []string{
|
||||
"CAP_CHOWN",
|
||||
"CAP_DAC_OVERRIDE",
|
||||
"CAP_FSETID",
|
||||
"CAP_FOWNER",
|
||||
"CAP_MKNOD",
|
||||
"CAP_NET_RAW",
|
||||
"CAP_SETGID",
|
||||
"CAP_SETUID",
|
||||
"CAP_SETFCAP",
|
||||
"CAP_SETPCAP",
|
||||
"CAP_NET_BIND_SERVICE",
|
||||
"CAP_SYS_CHROOT",
|
||||
"CAP_KILL",
|
||||
"CAP_AUDIT_WRITE",
|
||||
},
|
||||
Effective: []string{
|
||||
"CAP_CHOWN",
|
||||
"CAP_DAC_OVERRIDE",
|
||||
"CAP_FSETID",
|
||||
"CAP_FOWNER",
|
||||
"CAP_MKNOD",
|
||||
"CAP_NET_RAW",
|
||||
"CAP_SETGID",
|
||||
"CAP_SETUID",
|
||||
"CAP_SETFCAP",
|
||||
"CAP_SETPCAP",
|
||||
"CAP_NET_BIND_SERVICE",
|
||||
"CAP_SYS_CHROOT",
|
||||
"CAP_KILL",
|
||||
"CAP_AUDIT_WRITE",
|
||||
},
|
||||
Inheritable: []string{
|
||||
"CAP_CHOWN",
|
||||
"CAP_DAC_OVERRIDE",
|
||||
"CAP_FSETID",
|
||||
"CAP_FOWNER",
|
||||
"CAP_MKNOD",
|
||||
"CAP_NET_RAW",
|
||||
"CAP_SETGID",
|
||||
"CAP_SETUID",
|
||||
"CAP_SETFCAP",
|
||||
"CAP_SETPCAP",
|
||||
"CAP_NET_BIND_SERVICE",
|
||||
"CAP_SYS_CHROOT",
|
||||
"CAP_KILL",
|
||||
"CAP_AUDIT_WRITE",
|
||||
},
|
||||
Permitted: []string{
|
||||
"CAP_CHOWN",
|
||||
"CAP_DAC_OVERRIDE",
|
||||
"CAP_FSETID",
|
||||
"CAP_FOWNER",
|
||||
"CAP_MKNOD",
|
||||
"CAP_NET_RAW",
|
||||
"CAP_SETGID",
|
||||
"CAP_SETUID",
|
||||
"CAP_SETFCAP",
|
||||
"CAP_SETPCAP",
|
||||
"CAP_NET_BIND_SERVICE",
|
||||
"CAP_SYS_CHROOT",
|
||||
"CAP_KILL",
|
||||
"CAP_AUDIT_WRITE",
|
||||
},
|
||||
Ambient: []string{
|
||||
"CAP_CHOWN",
|
||||
"CAP_DAC_OVERRIDE",
|
||||
"CAP_FSETID",
|
||||
"CAP_FOWNER",
|
||||
"CAP_MKNOD",
|
||||
"CAP_NET_RAW",
|
||||
"CAP_SETGID",
|
||||
"CAP_SETUID",
|
||||
"CAP_SETFCAP",
|
||||
"CAP_SETPCAP",
|
||||
"CAP_NET_BIND_SERVICE",
|
||||
"CAP_SYS_CHROOT",
|
||||
"CAP_KILL",
|
||||
"CAP_AUDIT_WRITE",
|
||||
},
|
||||
},
|
||||
Namespaces: configs.Namespaces([]configs.Namespace{
|
||||
{Type: configs.NEWNS},
|
||||
{Type: configs.NEWUTS},
|
||||
{Type: configs.NEWIPC},
|
||||
{Type: configs.NEWPID},
|
||||
{Type: configs.NEWUSER},
|
||||
{Type: configs.NEWNET},
|
||||
}),
|
||||
Cgroups: &configs.Cgroup{
|
||||
Name: "test-container",
|
||||
Parent: "system",
|
||||
Resources: &configs.Resources{
|
||||
MemorySwappiness: nil,
|
||||
AllowAllDevices: nil,
|
||||
AllowedDevices: configs.DefaultAllowedDevices,
|
||||
},
|
||||
},
|
||||
MaskPaths: []string{
|
||||
"/proc/kcore",
|
||||
"/sys/firmware",
|
||||
},
|
||||
ReadonlyPaths: []string{
|
||||
"/proc/sys", "/proc/sysrq-trigger", "/proc/irq", "/proc/bus",
|
||||
},
|
||||
Devices: configs.DefaultAutoCreatedDevices,
|
||||
Hostname: "testing",
|
||||
Mounts: []*configs.Mount{
|
||||
{
|
||||
Source: "proc",
|
||||
Destination: "/proc",
|
||||
Device: "proc",
|
||||
Flags: defaultMountFlags,
|
||||
},
|
||||
{
|
||||
Source: "tmpfs",
|
||||
Destination: "/dev",
|
||||
Device: "tmpfs",
|
||||
Flags: unix.MS_NOSUID | unix.MS_STRICTATIME,
|
||||
Data: "mode=755",
|
||||
},
|
||||
{
|
||||
Source: "devpts",
|
||||
Destination: "/dev/pts",
|
||||
Device: "devpts",
|
||||
Flags: unix.MS_NOSUID | unix.MS_NOEXEC,
|
||||
Data: "newinstance,ptmxmode=0666,mode=0620,gid=5",
|
||||
},
|
||||
{
|
||||
Device: "tmpfs",
|
||||
Source: "shm",
|
||||
Destination: "/dev/shm",
|
||||
Data: "mode=1777,size=65536k",
|
||||
Flags: defaultMountFlags,
|
||||
},
|
||||
{
|
||||
Source: "mqueue",
|
||||
Destination: "/dev/mqueue",
|
||||
Device: "mqueue",
|
||||
Flags: defaultMountFlags,
|
||||
},
|
||||
{
|
||||
Source: "sysfs",
|
||||
Destination: "/sys",
|
||||
Device: "sysfs",
|
||||
Flags: defaultMountFlags | unix.MS_RDONLY,
|
||||
},
|
||||
},
|
||||
UidMappings: []configs.IDMap{
|
||||
{
|
||||
ContainerID: 0,
|
||||
HostID: 1000,
|
||||
Size: 65536,
|
||||
},
|
||||
},
|
||||
GidMappings: []configs.IDMap{
|
||||
{
|
||||
ContainerID: 0,
|
||||
HostID: 1000,
|
||||
Size: 65536,
|
||||
},
|
||||
},
|
||||
Networks: []*configs.Network{
|
||||
{
|
||||
Type: "loopback",
|
||||
Address: "127.0.0.1/0",
|
||||
Gateway: "localhost",
|
||||
},
|
||||
},
|
||||
Rlimits: []configs.Rlimit{
|
||||
{
|
||||
Type: unix.RLIMIT_NOFILE,
|
||||
Hard: uint64(1025),
|
||||
Soft: uint64(1025),
|
||||
},
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
Once you have the configuration populated you can create a container:
|
||||
|
||||
```go
|
||||
container, err := factory.Create("container-id", config)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
return
|
||||
}
|
||||
```
|
||||
|
||||
To spawn bash as the initial process inside the container and have the
|
||||
processes pid returned in order to wait, signal, or kill the process:
|
||||
|
||||
```go
|
||||
process := &libcontainer.Process{
|
||||
Args: []string{"/bin/bash"},
|
||||
Env: []string{"PATH=/bin"},
|
||||
User: "daemon",
|
||||
Stdin: os.Stdin,
|
||||
Stdout: os.Stdout,
|
||||
Stderr: os.Stderr,
|
||||
}
|
||||
|
||||
err := container.Run(process)
|
||||
if err != nil {
|
||||
container.Destroy()
|
||||
logrus.Fatal(err)
|
||||
return
|
||||
}
|
||||
|
||||
// wait for the process to finish.
|
||||
_, err := process.Wait()
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
// destroy the container.
|
||||
container.Destroy()
|
||||
```
|
||||
|
||||
Additional ways to interact with a running container are:
|
||||
|
||||
```go
|
||||
// return all the pids for all processes running inside the container.
|
||||
processes, err := container.Processes()
|
||||
|
||||
// get detailed cpu, memory, io, and network statistics for the container and
|
||||
// it's processes.
|
||||
stats, err := container.Stats()
|
||||
|
||||
// pause all processes inside the container.
|
||||
container.Pause()
|
||||
|
||||
// resume all paused processes.
|
||||
container.Resume()
|
||||
|
||||
// send signal to container's init process.
|
||||
container.Signal(signal)
|
||||
|
||||
// update container resource constraints.
|
||||
container.Set(config)
|
||||
|
||||
// get current status of the container.
|
||||
status, err := container.Status()
|
||||
|
||||
// get current container's state information.
|
||||
state, err := container.State()
|
||||
```
|
||||
|
||||
|
||||
#### Checkpoint & Restore
|
||||
|
||||
libcontainer now integrates [CRIU](http://criu.org/) for checkpointing and restoring containers.
|
||||
This let's you save the state of a process running inside a container to disk, and then restore
|
||||
that state into a new process, on the same machine or on another machine.
|
||||
|
||||
`criu` version 1.5.2 or higher is required to use checkpoint and restore.
|
||||
If you don't already have `criu` installed, you can build it from source, following the
|
||||
[online instructions](http://criu.org/Installation). `criu` is also installed in the docker image
|
||||
generated when building libcontainer with docker.
|
||||
|
||||
|
||||
## Copyright and license
|
||||
|
||||
Code and documentation copyright 2014 Docker, inc. Code released under the Apache 2.0 license.
|
||||
Docs released under Creative commons.
|
||||
|
44
vendor/github.com/opencontainers/runc/libcontainer/nsenter/README.md
generated
vendored
Normal file
44
vendor/github.com/opencontainers/runc/libcontainer/nsenter/README.md
generated
vendored
Normal file
@ -0,0 +1,44 @@
|
||||
## nsenter
|
||||
|
||||
The `nsenter` package registers a special init constructor that is called before
|
||||
the Go runtime has a chance to boot. This provides us the ability to `setns` on
|
||||
existing namespaces and avoid the issues that the Go runtime has with multiple
|
||||
threads. This constructor will be called if this package is registered,
|
||||
imported, in your go application.
|
||||
|
||||
The `nsenter` package will `import "C"` and it uses [cgo](https://golang.org/cmd/cgo/)
|
||||
package. In cgo, if the import of "C" is immediately preceded by a comment, that comment,
|
||||
called the preamble, is used as a header when compiling the C parts of the package.
|
||||
So every time we import package `nsenter`, the C code function `nsexec()` would be
|
||||
called. And package `nsenter` is now only imported in `main_unix.go`, so every time
|
||||
before we call `cmd.Start` on linux, that C code would run.
|
||||
|
||||
Because `nsexec()` must be run before the Go runtime in order to use the
|
||||
Linux kernel namespace, you must `import` this library into a package if
|
||||
you plan to use `libcontainer` directly. Otherwise Go will not execute
|
||||
the `nsexec()` constructor, which means that the re-exec will not cause
|
||||
the namespaces to be joined. You can import it like this:
|
||||
|
||||
```go
|
||||
import _ "github.com/opencontainers/runc/libcontainer/nsenter"
|
||||
```
|
||||
|
||||
`nsexec()` will first get the file descriptor number for the init pipe
|
||||
from the environment variable `_LIBCONTAINER_INITPIPE` (which was opened
|
||||
by the parent and kept open across the fork-exec of the `nsexec()` init
|
||||
process). The init pipe is used to read bootstrap data (namespace paths,
|
||||
clone flags, uid and gid mappings, and the console path) from the parent
|
||||
process. `nsexec()` will then call `setns(2)` to join the namespaces
|
||||
provided in the bootstrap data (if available), `clone(2)` a child process
|
||||
with the provided clone flags, update the user and group ID mappings, do
|
||||
some further miscellaneous setup steps, and then send the PID of the
|
||||
child process to the parent of the `nsexec()` "caller". Finally,
|
||||
the parent `nsexec()` will exit and the child `nsexec()` process will
|
||||
return to allow the Go runtime take over.
|
||||
|
||||
NOTE: We do both `setns(2)` and `clone(2)` even if we don't have any
|
||||
CLONE_NEW* clone flags because we must fork a new process in order to
|
||||
enter the PID namespace.
|
||||
|
||||
|
||||
|
32
vendor/github.com/opencontainers/runc/libcontainer/nsenter/namespace.h
generated
vendored
Normal file
32
vendor/github.com/opencontainers/runc/libcontainer/nsenter/namespace.h
generated
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
#ifndef NSENTER_NAMESPACE_H
|
||||
#define NSENTER_NAMESPACE_H
|
||||
|
||||
#ifndef _GNU_SOURCE
|
||||
# define _GNU_SOURCE
|
||||
#endif
|
||||
#include <sched.h>
|
||||
|
||||
/* All of these are taken from include/uapi/linux/sched.h */
|
||||
#ifndef CLONE_NEWNS
|
||||
# define CLONE_NEWNS 0x00020000 /* New mount namespace group */
|
||||
#endif
|
||||
#ifndef CLONE_NEWCGROUP
|
||||
# define CLONE_NEWCGROUP 0x02000000 /* New cgroup namespace */
|
||||
#endif
|
||||
#ifndef CLONE_NEWUTS
|
||||
# define CLONE_NEWUTS 0x04000000 /* New utsname namespace */
|
||||
#endif
|
||||
#ifndef CLONE_NEWIPC
|
||||
# define CLONE_NEWIPC 0x08000000 /* New ipc namespace */
|
||||
#endif
|
||||
#ifndef CLONE_NEWUSER
|
||||
# define CLONE_NEWUSER 0x10000000 /* New user namespace */
|
||||
#endif
|
||||
#ifndef CLONE_NEWPID
|
||||
# define CLONE_NEWPID 0x20000000 /* New pid namespace */
|
||||
#endif
|
||||
#ifndef CLONE_NEWNET
|
||||
# define CLONE_NEWNET 0x40000000 /* New network namespace */
|
||||
#endif
|
||||
|
||||
#endif /* NSENTER_NAMESPACE_H */
|
12
vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter.go
generated
vendored
Normal file
12
vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
// +build linux,!gccgo
|
||||
|
||||
package nsenter
|
||||
|
||||
/*
|
||||
#cgo CFLAGS: -Wall
|
||||
extern void nsexec();
|
||||
void __attribute__((constructor)) init(void) {
|
||||
nsexec();
|
||||
}
|
||||
*/
|
||||
import "C"
|
25
vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter_gccgo.go
generated
vendored
Normal file
25
vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter_gccgo.go
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
// +build linux,gccgo
|
||||
|
||||
package nsenter
|
||||
|
||||
/*
|
||||
#cgo CFLAGS: -Wall
|
||||
extern void nsexec();
|
||||
void __attribute__((constructor)) init(void) {
|
||||
nsexec();
|
||||
}
|
||||
*/
|
||||
import "C"
|
||||
|
||||
// AlwaysFalse is here to stay false
|
||||
// (and be exported so the compiler doesn't optimize out its reference)
|
||||
var AlwaysFalse bool
|
||||
|
||||
func init() {
|
||||
if AlwaysFalse {
|
||||
// by referencing this C init() in a noop test, it will ensure the compiler
|
||||
// links in the C function.
|
||||
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65134
|
||||
C.init()
|
||||
}
|
||||
}
|
5
vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter_unsupported.go
generated
vendored
Normal file
5
vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter_unsupported.go
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
// +build !linux !cgo
|
||||
|
||||
package nsenter
|
||||
|
||||
import "C"
|
862
vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsexec.c
generated
vendored
Normal file
862
vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsexec.c
generated
vendored
Normal file
@ -0,0 +1,862 @@
|
||||
#define _GNU_SOURCE
|
||||
#include <endian.h>
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include <grp.h>
|
||||
#include <sched.h>
|
||||
#include <setjmp.h>
|
||||
#include <signal.h>
|
||||
#include <stdarg.h>
|
||||
#include <stdbool.h>
|
||||
#include <stdint.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <stdbool.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include <sys/ioctl.h>
|
||||
#include <sys/prctl.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
#include <linux/limits.h>
|
||||
#include <linux/netlink.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
/* Get all of the CLONE_NEW* flags. */
|
||||
#include "namespace.h"
|
||||
|
||||
/* Synchronisation values. */
|
||||
enum sync_t {
|
||||
SYNC_USERMAP_PLS = 0x40, /* Request parent to map our users. */
|
||||
SYNC_USERMAP_ACK = 0x41, /* Mapping finished by the parent. */
|
||||
SYNC_RECVPID_PLS = 0x42, /* Tell parent we're sending the PID. */
|
||||
SYNC_RECVPID_ACK = 0x43, /* PID was correctly received by parent. */
|
||||
SYNC_GRANDCHILD = 0x44, /* The grandchild is ready to run. */
|
||||
SYNC_CHILD_READY = 0x45, /* The child or grandchild is ready to return. */
|
||||
|
||||
/* XXX: This doesn't help with segfaults and other such issues. */
|
||||
SYNC_ERR = 0xFF, /* Fatal error, no turning back. The error code follows. */
|
||||
};
|
||||
|
||||
/* longjmp() arguments. */
|
||||
#define JUMP_PARENT 0x00
|
||||
#define JUMP_CHILD 0xA0
|
||||
#define JUMP_INIT 0xA1
|
||||
|
||||
/* JSON buffer. */
|
||||
#define JSON_MAX 4096
|
||||
|
||||
/* Assume the stack grows down, so arguments should be above it. */
|
||||
struct clone_t {
|
||||
/*
|
||||
* Reserve some space for clone() to locate arguments
|
||||
* and retcode in this place
|
||||
*/
|
||||
char stack[4096] __attribute__ ((aligned(16)));
|
||||
char stack_ptr[0];
|
||||
|
||||
/* There's two children. This is used to execute the different code. */
|
||||
jmp_buf *env;
|
||||
int jmpval;
|
||||
};
|
||||
|
||||
struct nlconfig_t {
|
||||
char *data;
|
||||
uint32_t cloneflags;
|
||||
char *uidmap;
|
||||
size_t uidmap_len;
|
||||
char *gidmap;
|
||||
size_t gidmap_len;
|
||||
char *namespaces;
|
||||
size_t namespaces_len;
|
||||
uint8_t is_setgroup;
|
||||
uint8_t is_rootless;
|
||||
char *oom_score_adj;
|
||||
size_t oom_score_adj_len;
|
||||
};
|
||||
|
||||
/*
|
||||
* List of netlink message types sent to us as part of bootstrapping the init.
|
||||
* These constants are defined in libcontainer/message_linux.go.
|
||||
*/
|
||||
#define INIT_MSG 62000
|
||||
#define CLONE_FLAGS_ATTR 27281
|
||||
#define NS_PATHS_ATTR 27282
|
||||
#define UIDMAP_ATTR 27283
|
||||
#define GIDMAP_ATTR 27284
|
||||
#define SETGROUP_ATTR 27285
|
||||
#define OOM_SCORE_ADJ_ATTR 27286
|
||||
#define ROOTLESS_ATTR 27287
|
||||
|
||||
/*
|
||||
* Use the raw syscall for versions of glibc which don't include a function for
|
||||
* it, namely (glibc 2.12).
|
||||
*/
|
||||
#if __GLIBC__ == 2 && __GLIBC_MINOR__ < 14
|
||||
# define _GNU_SOURCE
|
||||
# include "syscall.h"
|
||||
# if !defined(SYS_setns) && defined(__NR_setns)
|
||||
# define SYS_setns __NR_setns
|
||||
# endif
|
||||
|
||||
#ifndef SYS_setns
|
||||
# error "setns(2) syscall not supported by glibc version"
|
||||
#endif
|
||||
|
||||
int setns(int fd, int nstype)
|
||||
{
|
||||
return syscall(SYS_setns, fd, nstype);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* XXX: This is ugly. */
|
||||
static int syncfd = -1;
|
||||
|
||||
/* TODO(cyphar): Fix this so it correctly deals with syncT. */
|
||||
#define bail(fmt, ...) \
|
||||
do { \
|
||||
int ret = __COUNTER__ + 1; \
|
||||
fprintf(stderr, "nsenter: " fmt ": %m\n", ##__VA_ARGS__); \
|
||||
if (syncfd >= 0) { \
|
||||
enum sync_t s = SYNC_ERR; \
|
||||
if (write(syncfd, &s, sizeof(s)) != sizeof(s)) \
|
||||
fprintf(stderr, "nsenter: failed: write(s)"); \
|
||||
if (write(syncfd, &ret, sizeof(ret)) != sizeof(ret)) \
|
||||
fprintf(stderr, "nsenter: failed: write(ret)"); \
|
||||
} \
|
||||
exit(ret); \
|
||||
} while(0)
|
||||
|
||||
static int write_file(char *data, size_t data_len, char *pathfmt, ...)
|
||||
{
|
||||
int fd, len, ret = 0;
|
||||
char path[PATH_MAX];
|
||||
|
||||
va_list ap;
|
||||
va_start(ap, pathfmt);
|
||||
len = vsnprintf(path, PATH_MAX, pathfmt, ap);
|
||||
va_end(ap);
|
||||
if (len < 0)
|
||||
return -1;
|
||||
|
||||
fd = open(path, O_RDWR);
|
||||
if (fd < 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
len = write(fd, data, data_len);
|
||||
if (len != data_len) {
|
||||
ret = -1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
close(fd);
|
||||
return ret;
|
||||
}
|
||||
|
||||
enum policy_t {
|
||||
SETGROUPS_DEFAULT = 0,
|
||||
SETGROUPS_ALLOW,
|
||||
SETGROUPS_DENY,
|
||||
};
|
||||
|
||||
/* This *must* be called before we touch gid_map. */
|
||||
static void update_setgroups(int pid, enum policy_t setgroup)
|
||||
{
|
||||
char *policy;
|
||||
|
||||
switch (setgroup) {
|
||||
case SETGROUPS_ALLOW:
|
||||
policy = "allow";
|
||||
break;
|
||||
case SETGROUPS_DENY:
|
||||
policy = "deny";
|
||||
break;
|
||||
case SETGROUPS_DEFAULT:
|
||||
default:
|
||||
/* Nothing to do. */
|
||||
return;
|
||||
}
|
||||
|
||||
if (write_file(policy, strlen(policy), "/proc/%d/setgroups", pid) < 0) {
|
||||
/*
|
||||
* If the kernel is too old to support /proc/pid/setgroups,
|
||||
* open(2) or write(2) will return ENOENT. This is fine.
|
||||
*/
|
||||
if (errno != ENOENT)
|
||||
bail("failed to write '%s' to /proc/%d/setgroups", policy, pid);
|
||||
}
|
||||
}
|
||||
|
||||
static void update_uidmap(int pid, char *map, size_t map_len)
|
||||
{
|
||||
if (map == NULL || map_len <= 0)
|
||||
return;
|
||||
|
||||
if (write_file(map, map_len, "/proc/%d/uid_map", pid) < 0)
|
||||
bail("failed to update /proc/%d/uid_map", pid);
|
||||
}
|
||||
|
||||
static void update_gidmap(int pid, char *map, size_t map_len)
|
||||
{
|
||||
if (map == NULL || map_len <= 0)
|
||||
return;
|
||||
|
||||
if (write_file(map, map_len, "/proc/%d/gid_map", pid) < 0)
|
||||
bail("failed to update /proc/%d/gid_map", pid);
|
||||
}
|
||||
|
||||
static void update_oom_score_adj(char *data, size_t len)
|
||||
{
|
||||
if (data == NULL || len <= 0)
|
||||
return;
|
||||
|
||||
if (write_file(data, len, "/proc/self/oom_score_adj") < 0)
|
||||
bail("failed to update /proc/self/oom_score_adj");
|
||||
}
|
||||
|
||||
/* A dummy function that just jumps to the given jumpval. */
|
||||
static int child_func(void *arg) __attribute__ ((noinline));
|
||||
static int child_func(void *arg)
|
||||
{
|
||||
struct clone_t *ca = (struct clone_t *)arg;
|
||||
longjmp(*ca->env, ca->jmpval);
|
||||
}
|
||||
|
||||
static int clone_parent(jmp_buf *env, int jmpval) __attribute__ ((noinline));
|
||||
static int clone_parent(jmp_buf *env, int jmpval)
|
||||
{
|
||||
struct clone_t ca = {
|
||||
.env = env,
|
||||
.jmpval = jmpval,
|
||||
};
|
||||
|
||||
return clone(child_func, ca.stack_ptr, CLONE_PARENT | SIGCHLD, &ca);
|
||||
}
|
||||
|
||||
/*
|
||||
* Gets the init pipe fd from the environment, which is used to read the
|
||||
* bootstrap data and tell the parent what the new pid is after we finish
|
||||
* setting up the environment.
|
||||
*/
|
||||
static int initpipe(void)
|
||||
{
|
||||
int pipenum;
|
||||
char *initpipe, *endptr;
|
||||
|
||||
initpipe = getenv("_LIBCONTAINER_INITPIPE");
|
||||
if (initpipe == NULL || *initpipe == '\0')
|
||||
return -1;
|
||||
|
||||
pipenum = strtol(initpipe, &endptr, 10);
|
||||
if (*endptr != '\0')
|
||||
bail("unable to parse _LIBCONTAINER_INITPIPE");
|
||||
|
||||
return pipenum;
|
||||
}
|
||||
|
||||
/* Returns the clone(2) flag for a namespace, given the name of a namespace. */
|
||||
static int nsflag(char *name)
|
||||
{
|
||||
if (!strcmp(name, "cgroup"))
|
||||
return CLONE_NEWCGROUP;
|
||||
else if (!strcmp(name, "ipc"))
|
||||
return CLONE_NEWIPC;
|
||||
else if (!strcmp(name, "mnt"))
|
||||
return CLONE_NEWNS;
|
||||
else if (!strcmp(name, "net"))
|
||||
return CLONE_NEWNET;
|
||||
else if (!strcmp(name, "pid"))
|
||||
return CLONE_NEWPID;
|
||||
else if (!strcmp(name, "user"))
|
||||
return CLONE_NEWUSER;
|
||||
else if (!strcmp(name, "uts"))
|
||||
return CLONE_NEWUTS;
|
||||
|
||||
/* If we don't recognise a name, fallback to 0. */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static uint32_t readint32(char *buf)
|
||||
{
|
||||
return *(uint32_t *) buf;
|
||||
}
|
||||
|
||||
static uint8_t readint8(char *buf)
|
||||
{
|
||||
return *(uint8_t *) buf;
|
||||
}
|
||||
|
||||
static void nl_parse(int fd, struct nlconfig_t *config)
|
||||
{
|
||||
size_t len, size;
|
||||
struct nlmsghdr hdr;
|
||||
char *data, *current;
|
||||
|
||||
/* Retrieve the netlink header. */
|
||||
len = read(fd, &hdr, NLMSG_HDRLEN);
|
||||
if (len != NLMSG_HDRLEN)
|
||||
bail("invalid netlink header length %zu", len);
|
||||
|
||||
if (hdr.nlmsg_type == NLMSG_ERROR)
|
||||
bail("failed to read netlink message");
|
||||
|
||||
if (hdr.nlmsg_type != INIT_MSG)
|
||||
bail("unexpected msg type %d", hdr.nlmsg_type);
|
||||
|
||||
/* Retrieve data. */
|
||||
size = NLMSG_PAYLOAD(&hdr, 0);
|
||||
current = data = malloc(size);
|
||||
if (!data)
|
||||
bail("failed to allocate %zu bytes of memory for nl_payload", size);
|
||||
|
||||
len = read(fd, data, size);
|
||||
if (len != size)
|
||||
bail("failed to read netlink payload, %zu != %zu", len, size);
|
||||
|
||||
/* Parse the netlink payload. */
|
||||
config->data = data;
|
||||
while (current < data + size) {
|
||||
struct nlattr *nlattr = (struct nlattr *)current;
|
||||
size_t payload_len = nlattr->nla_len - NLA_HDRLEN;
|
||||
|
||||
/* Advance to payload. */
|
||||
current += NLA_HDRLEN;
|
||||
|
||||
/* Handle payload. */
|
||||
switch (nlattr->nla_type) {
|
||||
case CLONE_FLAGS_ATTR:
|
||||
config->cloneflags = readint32(current);
|
||||
break;
|
||||
case ROOTLESS_ATTR:
|
||||
config->is_rootless = readint8(current);
|
||||
break;
|
||||
case OOM_SCORE_ADJ_ATTR:
|
||||
config->oom_score_adj = current;
|
||||
config->oom_score_adj_len = payload_len;
|
||||
break;
|
||||
case NS_PATHS_ATTR:
|
||||
config->namespaces = current;
|
||||
config->namespaces_len = payload_len;
|
||||
break;
|
||||
case UIDMAP_ATTR:
|
||||
config->uidmap = current;
|
||||
config->uidmap_len = payload_len;
|
||||
break;
|
||||
case GIDMAP_ATTR:
|
||||
config->gidmap = current;
|
||||
config->gidmap_len = payload_len;
|
||||
break;
|
||||
case SETGROUP_ATTR:
|
||||
config->is_setgroup = readint8(current);
|
||||
break;
|
||||
default:
|
||||
bail("unknown netlink message type %d", nlattr->nla_type);
|
||||
}
|
||||
|
||||
current += NLA_ALIGN(payload_len);
|
||||
}
|
||||
}
|
||||
|
||||
void nl_free(struct nlconfig_t *config)
|
||||
{
|
||||
free(config->data);
|
||||
}
|
||||
|
||||
void join_namespaces(char *nslist)
|
||||
{
|
||||
int num = 0, i;
|
||||
char *saveptr = NULL;
|
||||
char *namespace = strtok_r(nslist, ",", &saveptr);
|
||||
struct namespace_t {
|
||||
int fd;
|
||||
int ns;
|
||||
char type[PATH_MAX];
|
||||
char path[PATH_MAX];
|
||||
} *namespaces = NULL;
|
||||
|
||||
if (!namespace || !strlen(namespace) || !strlen(nslist))
|
||||
bail("ns paths are empty");
|
||||
|
||||
/*
|
||||
* We have to open the file descriptors first, since after
|
||||
* we join the mnt namespace we might no longer be able to
|
||||
* access the paths.
|
||||
*/
|
||||
do {
|
||||
int fd;
|
||||
char *path;
|
||||
struct namespace_t *ns;
|
||||
|
||||
/* Resize the namespace array. */
|
||||
namespaces = realloc(namespaces, ++num * sizeof(struct namespace_t));
|
||||
if (!namespaces)
|
||||
bail("failed to reallocate namespace array");
|
||||
ns = &namespaces[num - 1];
|
||||
|
||||
/* Split 'ns:path'. */
|
||||
path = strstr(namespace, ":");
|
||||
if (!path)
|
||||
bail("failed to parse %s", namespace);
|
||||
*path++ = '\0';
|
||||
|
||||
fd = open(path, O_RDONLY);
|
||||
if (fd < 0)
|
||||
bail("failed to open %s", path);
|
||||
|
||||
ns->fd = fd;
|
||||
ns->ns = nsflag(namespace);
|
||||
strncpy(ns->path, path, PATH_MAX);
|
||||
} while ((namespace = strtok_r(NULL, ",", &saveptr)) != NULL);
|
||||
|
||||
/*
|
||||
* The ordering in which we join namespaces is important. We should
|
||||
* always join the user namespace *first*. This is all guaranteed
|
||||
* from the container_linux.go side of this, so we're just going to
|
||||
* follow the order given to us.
|
||||
*/
|
||||
|
||||
for (i = 0; i < num; i++) {
|
||||
struct namespace_t ns = namespaces[i];
|
||||
|
||||
if (setns(ns.fd, ns.ns) < 0)
|
||||
bail("failed to setns to %s", ns.path);
|
||||
|
||||
close(ns.fd);
|
||||
}
|
||||
|
||||
free(namespaces);
|
||||
}
|
||||
|
||||
void nsexec(void)
|
||||
{
|
||||
int pipenum;
|
||||
jmp_buf env;
|
||||
int sync_child_pipe[2], sync_grandchild_pipe[2];
|
||||
struct nlconfig_t config = {0};
|
||||
|
||||
/*
|
||||
* If we don't have an init pipe, just return to the go routine.
|
||||
* We'll only get an init pipe for start or exec.
|
||||
*/
|
||||
pipenum = initpipe();
|
||||
if (pipenum == -1)
|
||||
return;
|
||||
|
||||
/* Parse all of the netlink configuration. */
|
||||
nl_parse(pipenum, &config);
|
||||
|
||||
/* Set oom_score_adj. This has to be done before !dumpable because
|
||||
* /proc/self/oom_score_adj is not writeable unless you're an privileged
|
||||
* user (if !dumpable is set). All children inherit their parent's
|
||||
* oom_score_adj value on fork(2) so this will always be propagated
|
||||
* properly.
|
||||
*/
|
||||
update_oom_score_adj(config.oom_score_adj, config.oom_score_adj_len);
|
||||
|
||||
/*
|
||||
* Make the process non-dumpable, to avoid various race conditions that
|
||||
* could cause processes in namespaces we're joining to access host
|
||||
* resources (or potentially execute code).
|
||||
*
|
||||
* However, if the number of namespaces we are joining is 0, we are not
|
||||
* going to be switching to a different security context. Thus setting
|
||||
* ourselves to be non-dumpable only breaks things (like rootless
|
||||
* containers), which is the recommendation from the kernel folks.
|
||||
*/
|
||||
if (config.namespaces) {
|
||||
if (prctl(PR_SET_DUMPABLE, 0, 0, 0, 0) < 0)
|
||||
bail("failed to set process as non-dumpable");
|
||||
}
|
||||
|
||||
/* Pipe so we can tell the child when we've finished setting up. */
|
||||
if (socketpair(AF_LOCAL, SOCK_STREAM, 0, sync_child_pipe) < 0)
|
||||
bail("failed to setup sync pipe between parent and child");
|
||||
|
||||
/*
|
||||
* We need a new socketpair to sync with grandchild so we don't have
|
||||
* race condition with child.
|
||||
*/
|
||||
if (socketpair(AF_LOCAL, SOCK_STREAM, 0, sync_grandchild_pipe) < 0)
|
||||
bail("failed to setup sync pipe between parent and grandchild");
|
||||
|
||||
/* TODO: Currently we aren't dealing with child deaths properly. */
|
||||
|
||||
/*
|
||||
* Okay, so this is quite annoying.
|
||||
*
|
||||
* In order for this unsharing code to be more extensible we need to split
|
||||
* up unshare(CLONE_NEWUSER) and clone() in various ways. The ideal case
|
||||
* would be if we did clone(CLONE_NEWUSER) and the other namespaces
|
||||
* separately, but because of SELinux issues we cannot really do that. But
|
||||
* we cannot just dump the namespace flags into clone(...) because several
|
||||
* usecases (such as rootless containers) require more granularity around
|
||||
* the namespace setup. In addition, some older kernels had issues where
|
||||
* CLONE_NEWUSER wasn't handled before other namespaces (but we cannot
|
||||
* handle this while also dealing with SELinux so we choose SELinux support
|
||||
* over broken kernel support).
|
||||
*
|
||||
* However, if we unshare(2) the user namespace *before* we clone(2), then
|
||||
* all hell breaks loose.
|
||||
*
|
||||
* The parent no longer has permissions to do many things (unshare(2) drops
|
||||
* all capabilities in your old namespace), and the container cannot be set
|
||||
* up to have more than one {uid,gid} mapping. This is obviously less than
|
||||
* ideal. In order to fix this, we have to first clone(2) and then unshare.
|
||||
*
|
||||
* Unfortunately, it's not as simple as that. We have to fork to enter the
|
||||
* PID namespace (the PID namespace only applies to children). Since we'll
|
||||
* have to double-fork, this clone_parent() call won't be able to get the
|
||||
* PID of the _actual_ init process (without doing more synchronisation than
|
||||
* I can deal with at the moment). So we'll just get the parent to send it
|
||||
* for us, the only job of this process is to update
|
||||
* /proc/pid/{setgroups,uid_map,gid_map}.
|
||||
*
|
||||
* And as a result of the above, we also need to setns(2) in the first child
|
||||
* because if we join a PID namespace in the topmost parent then our child
|
||||
* will be in that namespace (and it will not be able to give us a PID value
|
||||
* that makes sense without resorting to sending things with cmsg).
|
||||
*
|
||||
* This also deals with an older issue caused by dumping cloneflags into
|
||||
* clone(2): On old kernels, CLONE_PARENT didn't work with CLONE_NEWPID, so
|
||||
* we have to unshare(2) before clone(2) in order to do this. This was fixed
|
||||
* in upstream commit 1f7f4dde5c945f41a7abc2285be43d918029ecc5, and was
|
||||
* introduced by 40a0d32d1eaffe6aac7324ca92604b6b3977eb0e. As far as we're
|
||||
* aware, the last mainline kernel which had this bug was Linux 3.12.
|
||||
* However, we cannot comment on which kernels the broken patch was
|
||||
* backported to.
|
||||
*
|
||||
* -- Aleksa "what has my life come to?" Sarai
|
||||
*/
|
||||
|
||||
switch (setjmp(env)) {
|
||||
/*
|
||||
* Stage 0: We're in the parent. Our job is just to create a new child
|
||||
* (stage 1: JUMP_CHILD) process and write its uid_map and
|
||||
* gid_map. That process will go on to create a new process, then
|
||||
* it will send us its PID which we will send to the bootstrap
|
||||
* process.
|
||||
*/
|
||||
case JUMP_PARENT: {
|
||||
int len;
|
||||
pid_t child;
|
||||
char buf[JSON_MAX];
|
||||
bool ready = false;
|
||||
|
||||
/* For debugging. */
|
||||
prctl(PR_SET_NAME, (unsigned long) "runc:[0:PARENT]", 0, 0, 0);
|
||||
|
||||
/* Start the process of getting a container. */
|
||||
child = clone_parent(&env, JUMP_CHILD);
|
||||
if (child < 0)
|
||||
bail("unable to fork: child_func");
|
||||
|
||||
/*
|
||||
* State machine for synchronisation with the children.
|
||||
*
|
||||
* Father only return when both child and grandchild are
|
||||
* ready, so we can receive all possible error codes
|
||||
* generated by children.
|
||||
*/
|
||||
while (!ready) {
|
||||
enum sync_t s;
|
||||
int ret;
|
||||
|
||||
syncfd = sync_child_pipe[1];
|
||||
close(sync_child_pipe[0]);
|
||||
|
||||
if (read(syncfd, &s, sizeof(s)) != sizeof(s))
|
||||
bail("failed to sync with child: next state");
|
||||
|
||||
switch (s) {
|
||||
case SYNC_ERR:
|
||||
/* We have to mirror the error code of the child. */
|
||||
if (read(syncfd, &ret, sizeof(ret)) != sizeof(ret))
|
||||
bail("failed to sync with child: read(error code)");
|
||||
|
||||
exit(ret);
|
||||
case SYNC_USERMAP_PLS:
|
||||
/*
|
||||
* Enable setgroups(2) if we've been asked to. But we also
|
||||
* have to explicitly disable setgroups(2) if we're
|
||||
* creating a rootless container (this is required since
|
||||
* Linux 3.19).
|
||||
*/
|
||||
if (config.is_rootless && config.is_setgroup) {
|
||||
kill(child, SIGKILL);
|
||||
bail("cannot allow setgroup in an unprivileged user namespace setup");
|
||||
}
|
||||
|
||||
if (config.is_setgroup)
|
||||
update_setgroups(child, SETGROUPS_ALLOW);
|
||||
if (config.is_rootless)
|
||||
update_setgroups(child, SETGROUPS_DENY);
|
||||
|
||||
/* Set up mappings. */
|
||||
update_uidmap(child, config.uidmap, config.uidmap_len);
|
||||
update_gidmap(child, config.gidmap, config.gidmap_len);
|
||||
|
||||
s = SYNC_USERMAP_ACK;
|
||||
if (write(syncfd, &s, sizeof(s)) != sizeof(s)) {
|
||||
kill(child, SIGKILL);
|
||||
bail("failed to sync with child: write(SYNC_USERMAP_ACK)");
|
||||
}
|
||||
break;
|
||||
case SYNC_RECVPID_PLS: {
|
||||
pid_t old = child;
|
||||
|
||||
/* Get the init_func pid. */
|
||||
if (read(syncfd, &child, sizeof(child)) != sizeof(child)) {
|
||||
kill(old, SIGKILL);
|
||||
bail("failed to sync with child: read(childpid)");
|
||||
}
|
||||
|
||||
/* Send ACK. */
|
||||
s = SYNC_RECVPID_ACK;
|
||||
if (write(syncfd, &s, sizeof(s)) != sizeof(s)) {
|
||||
kill(old, SIGKILL);
|
||||
kill(child, SIGKILL);
|
||||
bail("failed to sync with child: write(SYNC_RECVPID_ACK)");
|
||||
}
|
||||
}
|
||||
break;
|
||||
case SYNC_CHILD_READY:
|
||||
ready = true;
|
||||
break;
|
||||
default:
|
||||
bail("unexpected sync value: %u", s);
|
||||
}
|
||||
}
|
||||
|
||||
/* Now sync with grandchild. */
|
||||
|
||||
ready = false;
|
||||
while (!ready) {
|
||||
enum sync_t s;
|
||||
int ret;
|
||||
|
||||
syncfd = sync_grandchild_pipe[1];
|
||||
close(sync_grandchild_pipe[0]);
|
||||
|
||||
s = SYNC_GRANDCHILD;
|
||||
if (write(syncfd, &s, sizeof(s)) != sizeof(s)) {
|
||||
kill(child, SIGKILL);
|
||||
bail("failed to sync with child: write(SYNC_GRANDCHILD)");
|
||||
}
|
||||
|
||||
if (read(syncfd, &s, sizeof(s)) != sizeof(s))
|
||||
bail("failed to sync with child: next state");
|
||||
|
||||
switch (s) {
|
||||
case SYNC_ERR:
|
||||
/* We have to mirror the error code of the child. */
|
||||
if (read(syncfd, &ret, sizeof(ret)) != sizeof(ret))
|
||||
bail("failed to sync with child: read(error code)");
|
||||
|
||||
exit(ret);
|
||||
case SYNC_CHILD_READY:
|
||||
ready = true;
|
||||
break;
|
||||
default:
|
||||
bail("unexpected sync value: %u", s);
|
||||
}
|
||||
}
|
||||
|
||||
/* Send the init_func pid back to our parent. */
|
||||
len = snprintf(buf, JSON_MAX, "{\"pid\": %d}\n", child);
|
||||
if (len < 0) {
|
||||
kill(child, SIGKILL);
|
||||
bail("unable to generate JSON for child pid");
|
||||
}
|
||||
if (write(pipenum, buf, len) != len) {
|
||||
kill(child, SIGKILL);
|
||||
bail("unable to send child pid to bootstrapper");
|
||||
}
|
||||
|
||||
exit(0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Stage 1: We're in the first child process. Our job is to join any
|
||||
* provided namespaces in the netlink payload and unshare all
|
||||
* of the requested namespaces. If we've been asked to
|
||||
* CLONE_NEWUSER, we will ask our parent (stage 0) to set up
|
||||
* our user mappings for us. Then, we create a new child
|
||||
* (stage 2: JUMP_INIT) for PID namespace. We then send the
|
||||
* child's PID to our parent (stage 0).
|
||||
*/
|
||||
case JUMP_CHILD: {
|
||||
pid_t child;
|
||||
enum sync_t s;
|
||||
|
||||
/* We're in a child and thus need to tell the parent if we die. */
|
||||
syncfd = sync_child_pipe[0];
|
||||
close(sync_child_pipe[1]);
|
||||
|
||||
/* For debugging. */
|
||||
prctl(PR_SET_NAME, (unsigned long) "runc:[1:CHILD]", 0, 0, 0);
|
||||
|
||||
/*
|
||||
* We need to setns first. We cannot do this earlier (in stage 0)
|
||||
* because of the fact that we forked to get here (the PID of
|
||||
* [stage 2: JUMP_INIT]) would be meaningless). We could send it
|
||||
* using cmsg(3) but that's just annoying.
|
||||
*/
|
||||
if (config.namespaces)
|
||||
join_namespaces(config.namespaces);
|
||||
|
||||
/*
|
||||
* Unshare all of the namespaces. Now, it should be noted that this
|
||||
* ordering might break in the future (especially with rootless
|
||||
* containers). But for now, it's not possible to split this into
|
||||
* CLONE_NEWUSER + [the rest] because of some RHEL SELinux issues.
|
||||
*
|
||||
* Note that we don't merge this with clone() because there were
|
||||
* some old kernel versions where clone(CLONE_PARENT | CLONE_NEWPID)
|
||||
* was broken, so we'll just do it the long way anyway.
|
||||
*/
|
||||
if (unshare(config.cloneflags) < 0)
|
||||
bail("failed to unshare namespaces");
|
||||
|
||||
/*
|
||||
* Deal with user namespaces first. They are quite special, as they
|
||||
* affect our ability to unshare other namespaces and are used as
|
||||
* context for privilege checks.
|
||||
*/
|
||||
if (config.cloneflags & CLONE_NEWUSER) {
|
||||
/*
|
||||
* We don't have the privileges to do any mapping here (see the
|
||||
* clone_parent rant). So signal our parent to hook us up.
|
||||
*/
|
||||
|
||||
/* Switching is only necessary if we joined namespaces. */
|
||||
if (config.namespaces) {
|
||||
if (prctl(PR_SET_DUMPABLE, 1, 0, 0, 0) < 0)
|
||||
bail("failed to set process as dumpable");
|
||||
}
|
||||
s = SYNC_USERMAP_PLS;
|
||||
if (write(syncfd, &s, sizeof(s)) != sizeof(s))
|
||||
bail("failed to sync with parent: write(SYNC_USERMAP_PLS)");
|
||||
|
||||
/* ... wait for mapping ... */
|
||||
|
||||
if (read(syncfd, &s, sizeof(s)) != sizeof(s))
|
||||
bail("failed to sync with parent: read(SYNC_USERMAP_ACK)");
|
||||
if (s != SYNC_USERMAP_ACK)
|
||||
bail("failed to sync with parent: SYNC_USERMAP_ACK: got %u", s);
|
||||
/* Switching is only necessary if we joined namespaces. */
|
||||
if (config.namespaces) {
|
||||
if (prctl(PR_SET_DUMPABLE, 0, 0, 0, 0) < 0)
|
||||
bail("failed to set process as dumpable");
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* TODO: What about non-namespace clone flags that we're dropping here?
|
||||
*
|
||||
* We fork again because of PID namespace, setns(2) or unshare(2) don't
|
||||
* change the PID namespace of the calling process, because doing so
|
||||
* would change the caller's idea of its own PID (as reported by getpid()),
|
||||
* which would break many applications and libraries, so we must fork
|
||||
* to actually enter the new PID namespace.
|
||||
*/
|
||||
child = clone_parent(&env, JUMP_INIT);
|
||||
if (child < 0)
|
||||
bail("unable to fork: init_func");
|
||||
|
||||
/* Send the child to our parent, which knows what it's doing. */
|
||||
s = SYNC_RECVPID_PLS;
|
||||
if (write(syncfd, &s, sizeof(s)) != sizeof(s)) {
|
||||
kill(child, SIGKILL);
|
||||
bail("failed to sync with parent: write(SYNC_RECVPID_PLS)");
|
||||
}
|
||||
if (write(syncfd, &child, sizeof(child)) != sizeof(child)) {
|
||||
kill(child, SIGKILL);
|
||||
bail("failed to sync with parent: write(childpid)");
|
||||
}
|
||||
|
||||
/* ... wait for parent to get the pid ... */
|
||||
|
||||
if (read(syncfd, &s, sizeof(s)) != sizeof(s)) {
|
||||
kill(child, SIGKILL);
|
||||
bail("failed to sync with parent: read(SYNC_RECVPID_ACK)");
|
||||
}
|
||||
if (s != SYNC_RECVPID_ACK) {
|
||||
kill(child, SIGKILL);
|
||||
bail("failed to sync with parent: SYNC_RECVPID_ACK: got %u", s);
|
||||
}
|
||||
|
||||
s = SYNC_CHILD_READY;
|
||||
if (write(syncfd, &s, sizeof(s)) != sizeof(s)) {
|
||||
kill(child, SIGKILL);
|
||||
bail("failed to sync with parent: write(SYNC_CHILD_READY)");
|
||||
}
|
||||
|
||||
/* Our work is done. [Stage 2: JUMP_INIT] is doing the rest of the work. */
|
||||
exit(0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Stage 2: We're the final child process, and the only process that will
|
||||
* actually return to the Go runtime. Our job is to just do the
|
||||
* final cleanup steps and then return to the Go runtime to allow
|
||||
* init_linux.go to run.
|
||||
*/
|
||||
case JUMP_INIT: {
|
||||
/*
|
||||
* We're inside the child now, having jumped from the
|
||||
* start_child() code after forking in the parent.
|
||||
*/
|
||||
enum sync_t s;
|
||||
|
||||
/* We're in a child and thus need to tell the parent if we die. */
|
||||
syncfd = sync_grandchild_pipe[0];
|
||||
close(sync_grandchild_pipe[1]);
|
||||
close(sync_child_pipe[0]);
|
||||
close(sync_child_pipe[1]);
|
||||
|
||||
/* For debugging. */
|
||||
prctl(PR_SET_NAME, (unsigned long) "runc:[2:INIT]", 0, 0, 0);
|
||||
|
||||
if (read(syncfd, &s, sizeof(s)) != sizeof(s))
|
||||
bail("failed to sync with parent: read(SYNC_GRANDCHILD)");
|
||||
if (s != SYNC_GRANDCHILD)
|
||||
bail("failed to sync with parent: SYNC_GRANDCHILD: got %u", s);
|
||||
|
||||
if (setsid() < 0)
|
||||
bail("setsid failed");
|
||||
|
||||
if (setuid(0) < 0)
|
||||
bail("setuid failed");
|
||||
|
||||
if (setgid(0) < 0)
|
||||
bail("setgid failed");
|
||||
|
||||
if (!config.is_rootless && config.is_setgroup) {
|
||||
if (setgroups(0, NULL) < 0)
|
||||
bail("setgroups failed");
|
||||
}
|
||||
|
||||
s = SYNC_CHILD_READY;
|
||||
if (write(syncfd, &s, sizeof(s)) != sizeof(s))
|
||||
bail("failed to sync with patent: write(SYNC_CHILD_READY)");
|
||||
|
||||
/* Close sync pipes. */
|
||||
close(sync_grandchild_pipe[0]);
|
||||
|
||||
/* Free netlink data. */
|
||||
nl_free(&config);
|
||||
|
||||
/* Finish executing, let the Go runtime take over. */
|
||||
return;
|
||||
}
|
||||
default:
|
||||
bail("unexpected jump value");
|
||||
}
|
||||
|
||||
/* Should never be reached. */
|
||||
bail("should never be reached");
|
||||
}
|
21
vendor/github.com/opencontainers/runc/vendor.conf
generated
vendored
Normal file
21
vendor/github.com/opencontainers/runc/vendor.conf
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
# OCI runtime-spec. When updating this, make sure you use a version tag rather
|
||||
# than a commit ID so it's much more obvious what version of the spec we are
|
||||
# using.
|
||||
github.com/opencontainers/runtime-spec v1.0.0
|
||||
# Core libcontainer functionality.
|
||||
github.com/mrunalp/fileutils ed869b029674c0e9ce4c0dfa781405c2d9946d08
|
||||
github.com/opencontainers/selinux v1.0.0-rc1
|
||||
github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0
|
||||
github.com/sirupsen/logrus a3f95b5c423586578a4e099b11a46c2479628cac
|
||||
github.com/syndtr/gocapability db04d3cc01c8b54962a58ec7e491717d06cfcc16
|
||||
github.com/vishvananda/netlink 1e2e08e8a2dcdacaae3f14ac44c5cfa31361f270
|
||||
# systemd integration.
|
||||
github.com/coreos/go-systemd v14
|
||||
github.com/coreos/pkg v3
|
||||
github.com/godbus/dbus v3
|
||||
github.com/golang/protobuf 18c9bb3261723cd5401db4d0c9fbc5c3b6c70fe8
|
||||
# Command-line interface.
|
||||
github.com/docker/docker 0f5c9d301b9b1cca66b3ea0f9dec3b5317d3686d
|
||||
github.com/docker/go-units v0.2.0
|
||||
github.com/urfave/cli d53eb991652b1d438abdd34ce4bfa3ef1539108e
|
||||
golang.org/x/sys 0e0164865330d5cf1c00247be08330bf96e2f87c https://github.com/golang/sys
|
158
vendor/github.com/opencontainers/runtime-spec/README.md
generated
vendored
Normal file
158
vendor/github.com/opencontainers/runtime-spec/README.md
generated
vendored
Normal file
@ -0,0 +1,158 @@
|
||||
# Open Container Initiative Runtime Specification
|
||||
|
||||
The [Open Container Initiative][oci] develops specifications for standards on Operating System process and application containers.
|
||||
|
||||
The specification can be found [here](spec.md).
|
||||
|
||||
## Table of Contents
|
||||
|
||||
Additional documentation about how this group operates:
|
||||
|
||||
- [Code of Conduct][code-of-conduct]
|
||||
- [Style and Conventions](style.md)
|
||||
- [Implementations](implementations.md)
|
||||
- [Releases](RELEASES.md)
|
||||
- [project](project.md)
|
||||
- [charter][charter]
|
||||
|
||||
## Use Cases
|
||||
|
||||
To provide context for users the following section gives example use cases for each part of the spec.
|
||||
|
||||
### Application Bundle Builders
|
||||
|
||||
Application bundle builders can create a [bundle](bundle.md) directory that includes all of the files required for launching an application as a container.
|
||||
The bundle contains an OCI [configuration file](config.md) where the builder can specify host-independent details such as [which executable to launch](config.md#process) and host-specific settings such as [mount](config.md#mounts) locations, [hook](config.md#hooks) paths, Linux [namespaces](config-linux.md#namespaces) and [cgroups](config-linux.md#control-groups).
|
||||
Because the configuration includes host-specific settings, application bundle directories copied between two hosts may require configuration adjustments.
|
||||
|
||||
### Hook Developers
|
||||
|
||||
[Hook](config.md#hooks) developers can extend the functionality of an OCI-compliant runtime by hooking into a container's lifecycle with an external application.
|
||||
Example use cases include sophisticated network configuration, volume garbage collection, etc.
|
||||
|
||||
### Runtime Developers
|
||||
|
||||
Runtime developers can build runtime implementations that run OCI-compliant bundles and container configuration, containing low-level OS and host-specific details, on a particular platform.
|
||||
|
||||
## Contributing
|
||||
|
||||
Development happens on GitHub for the spec.
|
||||
Issues are used for bugs and actionable items and longer discussions can happen on the [mailing list](#mailing-list).
|
||||
|
||||
The specification and code is licensed under the Apache 2.0 license found in the [LICENSE](./LICENSE) file.
|
||||
|
||||
### Discuss your design
|
||||
|
||||
The project welcomes submissions, but please let everyone know what you are working on.
|
||||
|
||||
Before undertaking a nontrivial change to this specification, send mail to the [mailing list](#mailing-list) to discuss what you plan to do.
|
||||
This gives everyone a chance to validate the design, helps prevent duplication of effort, and ensures that the idea fits.
|
||||
It also guarantees that the design is sound before code is written; a GitHub pull-request is not the place for high-level discussions.
|
||||
|
||||
Typos and grammatical errors can go straight to a pull-request.
|
||||
When in doubt, start on the [mailing-list](#mailing-list).
|
||||
|
||||
### Weekly Call
|
||||
|
||||
The contributors and maintainers of all OCI projects have a weekly meeting on Wednesdays at:
|
||||
|
||||
* 8:00 AM (USA Pacific), during [odd weeks][iso-week].
|
||||
* 2:00 PM (USA Pacific), during [even weeks][iso-week].
|
||||
|
||||
There is an [iCalendar][rfc5545] format for the meetings [here](meeting.ics).
|
||||
|
||||
Everyone is welcome to participate via [UberConference web][uberconference] or audio-only: +1 415 968 0849 (no PIN needed).
|
||||
An initial agenda will be posted to the [mailing list](#mailing-list) earlier in the week, and everyone is welcome to propose additional topics or suggest other agenda alterations there.
|
||||
Minutes are posted to the [mailing list](#mailing-list) and minutes from past calls are archived [here][minutes], with minutes from especially old meetings (September 2015 and earlier) archived [here][runtime-wiki].
|
||||
|
||||
### Mailing List
|
||||
|
||||
You can subscribe and join the mailing list on [Google Groups][dev-list].
|
||||
|
||||
### IRC
|
||||
|
||||
OCI discussion happens on #opencontainers on Freenode ([logs][irc-logs]).
|
||||
|
||||
### Git commit
|
||||
|
||||
#### Sign your work
|
||||
|
||||
The sign-off is a simple line at the end of the explanation for the patch, which certifies that you wrote it or otherwise have the right to pass it on as an open-source patch.
|
||||
The rules are pretty simple: if you can certify the below (from http://developercertificate.org):
|
||||
|
||||
```
|
||||
Developer Certificate of Origin
|
||||
Version 1.1
|
||||
|
||||
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||
660 York Street, Suite 102,
|
||||
San Francisco, CA 94110 USA
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies of this
|
||||
license document, but changing it is not allowed.
|
||||
|
||||
|
||||
Developer's Certificate of Origin 1.1
|
||||
|
||||
By making a contribution to this project, I certify that:
|
||||
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
||||
```
|
||||
|
||||
then you just add a line to every git commit message:
|
||||
|
||||
Signed-off-by: Joe Smith <joe@gmail.com>
|
||||
|
||||
using your real name (sorry, no pseudonyms or anonymous contributions.)
|
||||
|
||||
You can add the sign off when creating the git commit via `git commit -s`.
|
||||
|
||||
#### Commit Style
|
||||
|
||||
Simple house-keeping for clean git history.
|
||||
Read more on [How to Write a Git Commit Message][how-to-git-commit] or the Discussion section of [git-commit(1)][git-commit.1].
|
||||
|
||||
1. Separate the subject from body with a blank line
|
||||
2. Limit the subject line to 50 characters
|
||||
3. Capitalize the subject line
|
||||
4. Do not end the subject line with a period
|
||||
5. Use the imperative mood in the subject line
|
||||
6. Wrap the body at 72 characters
|
||||
7. Use the body to explain what and why vs. how
|
||||
* If there was important/useful/essential conversation or information, copy or include a reference
|
||||
8. When possible, one keyword to scope the change in the subject (i.e. "README: ...", "runtime: ...")
|
||||
|
||||
|
||||
[charter]: https://www.opencontainers.org/about/governance
|
||||
[code-of-conduct]: https://github.com/opencontainers/tob/blob/master/code-of-conduct.md
|
||||
[dev-list]: https://groups.google.com/a/opencontainers.org/forum/#!forum/dev
|
||||
[how-to-git-commit]: http://chris.beams.io/posts/git-commit
|
||||
[irc-logs]: http://ircbot.wl.linuxfoundation.org/eavesdrop/%23opencontainers/
|
||||
[iso-week]: https://en.wikipedia.org/wiki/ISO_week_date#Calculating_the_week_number_of_a_given_date
|
||||
[minutes]: http://ircbot.wl.linuxfoundation.org/meetings/opencontainers/
|
||||
[oci]: https://www.opencontainers.org
|
||||
[rfc5545]: https://tools.ietf.org/html/rfc5545
|
||||
[runtime-wiki]: https://github.com/opencontainers/runtime-spec/wiki
|
||||
[uberconference]: https://www.uberconference.com/opencontainers
|
||||
|
||||
[git-commit.1]: http://git-scm.com/docs/git-commit
|
84
vendor/github.com/opencontainers/runtime-tools/README.md
generated
vendored
Normal file
84
vendor/github.com/opencontainers/runtime-tools/README.md
generated
vendored
Normal file
@ -0,0 +1,84 @@
|
||||
# oci-runtime-tool [](https://travis-ci.org/opencontainers/runtime-tools) [](https://goreportcard.com/report/github.com/opencontainers/runtime-tools)
|
||||
|
||||
oci-runtime-tool is a collection of tools for working with the [OCI runtime specification][runtime-spec].
|
||||
To build from source code, runtime-tools requires Go 1.7.x or above.
|
||||
|
||||
## Generating an OCI runtime spec configuration files
|
||||
|
||||
[`oci-runtime-tool generate`][generate.1] generates [configuration JSON][config.json] for an [OCI bundle][bundle].
|
||||
[OCI-compatible runtimes][runtime-spec] like [runC][] expect to read the configuration from `config.json`.
|
||||
|
||||
```sh
|
||||
$ oci-runtime-tool generate --output config.json
|
||||
$ cat config.json
|
||||
{
|
||||
"ociVersion": "0.5.0",
|
||||
…
|
||||
}
|
||||
```
|
||||
|
||||
## Validating an OCI bundle
|
||||
|
||||
[`oci-runtime-tool validate`][validate.1] validates an OCI bundle.
|
||||
The error message will be printed if the OCI bundle failed the validation procedure.
|
||||
|
||||
```sh
|
||||
$ oci-runtime-tool generate
|
||||
$ oci-runtime-tool validate
|
||||
INFO[0000] Bundle validation succeeded.
|
||||
```
|
||||
|
||||
## Testing OCI runtimes
|
||||
|
||||
```sh
|
||||
$ sudo make RUNTIME=runc localvalidation
|
||||
RUNTIME=runc go test -tags "" -v github.com/opencontainers/runtime-tools/validation
|
||||
=== RUN TestValidateBasic
|
||||
TAP version 13
|
||||
ok 1 - root filesystem
|
||||
ok 2 - hostname
|
||||
ok 3 - mounts
|
||||
ok 4 - capabilities
|
||||
ok 5 - default symlinks
|
||||
ok 6 - default devices
|
||||
ok 7 - linux devices
|
||||
ok 8 - linux process
|
||||
ok 9 - masked paths
|
||||
ok 10 - oom score adj
|
||||
ok 11 - read only paths
|
||||
ok 12 - rlimits
|
||||
ok 13 - sysctls
|
||||
ok 14 - uid mappings
|
||||
ok 15 - gid mappings
|
||||
1..15
|
||||
--- PASS: TestValidateBasic (0.08s)
|
||||
=== RUN TestValidateSysctls
|
||||
TAP version 13
|
||||
ok 1 - root filesystem
|
||||
ok 2 - hostname
|
||||
ok 3 - mounts
|
||||
ok 4 - capabilities
|
||||
ok 5 - default symlinks
|
||||
ok 6 - default devices
|
||||
ok 7 - linux devices
|
||||
ok 8 - linux process
|
||||
ok 9 - masked paths
|
||||
ok 10 - oom score adj
|
||||
ok 11 - read only paths
|
||||
ok 12 - rlimits
|
||||
ok 13 - sysctls
|
||||
ok 14 - uid mappings
|
||||
ok 15 - gid mappings
|
||||
1..15
|
||||
--- PASS: TestValidateSysctls (0.20s)
|
||||
PASS
|
||||
ok github.com/opencontainers/runtime-tools/validation 0.281s
|
||||
```
|
||||
|
||||
[bundle]: https://github.com/opencontainers/runtime-spec/blob/master/bundle.md
|
||||
[config.json]: https://github.com/opencontainers/runtime-spec/blob/master/config.md
|
||||
[runC]: https://github.com/opencontainers/runc
|
||||
[runtime-spec]: https://github.com/opencontainers/runtime-spec
|
||||
|
||||
[generate.1]: man/oci-runtime-tool-generate.1.md
|
||||
[validate.1]: man/oci-runtime-tool-validate.1.md
|
24
vendor/github.com/pkg/errors/.gitignore
generated
vendored
24
vendor/github.com/pkg/errors/.gitignore
generated
vendored
@ -1,24 +0,0 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
11
vendor/github.com/pkg/errors/.travis.yml
generated
vendored
11
vendor/github.com/pkg/errors/.travis.yml
generated
vendored
@ -1,11 +0,0 @@
|
||||
language: go
|
||||
go_import_path: github.com/pkg/errors
|
||||
go:
|
||||
- 1.4.3
|
||||
- 1.5.4
|
||||
- 1.6.2
|
||||
- 1.7.1
|
||||
- tip
|
||||
|
||||
script:
|
||||
- go test -v ./...
|
32
vendor/github.com/pkg/errors/appveyor.yml
generated
vendored
32
vendor/github.com/pkg/errors/appveyor.yml
generated
vendored
@ -1,32 +0,0 @@
|
||||
version: build-{build}.{branch}
|
||||
|
||||
clone_folder: C:\gopath\src\github.com\pkg\errors
|
||||
shallow_clone: true # for startup speed
|
||||
|
||||
environment:
|
||||
GOPATH: C:\gopath
|
||||
|
||||
platform:
|
||||
- x64
|
||||
|
||||
# http://www.appveyor.com/docs/installed-software
|
||||
install:
|
||||
# some helpful output for debugging builds
|
||||
- go version
|
||||
- go env
|
||||
# pre-installed MinGW at C:\MinGW is 32bit only
|
||||
# but MSYS2 at C:\msys64 has mingw64
|
||||
- set PATH=C:\msys64\mingw64\bin;%PATH%
|
||||
- gcc --version
|
||||
- g++ --version
|
||||
|
||||
build_script:
|
||||
- go install -v ./...
|
||||
|
||||
test_script:
|
||||
- set PATH=C:\gopath\bin;%PATH%
|
||||
- go test -v ./...
|
||||
|
||||
#artifacts:
|
||||
# - path: '%GOPATH%\bin\*.exe'
|
||||
deploy: off
|
50
vendor/github.com/pmezard/go-difflib/README.md
generated
vendored
Normal file
50
vendor/github.com/pmezard/go-difflib/README.md
generated
vendored
Normal file
@ -0,0 +1,50 @@
|
||||
go-difflib
|
||||
==========
|
||||
|
||||
[](https://travis-ci.org/pmezard/go-difflib)
|
||||
[](https://godoc.org/github.com/pmezard/go-difflib/difflib)
|
||||
|
||||
Go-difflib is a partial port of python 3 difflib package. Its main goal
|
||||
was to make unified and context diff available in pure Go, mostly for
|
||||
testing purposes.
|
||||
|
||||
The following class and functions (and related tests) have be ported:
|
||||
|
||||
* `SequenceMatcher`
|
||||
* `unified_diff()`
|
||||
* `context_diff()`
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
$ go get github.com/pmezard/go-difflib/difflib
|
||||
```
|
||||
|
||||
### Quick Start
|
||||
|
||||
Diffs are configured with Unified (or ContextDiff) structures, and can
|
||||
be output to an io.Writer or returned as a string.
|
||||
|
||||
```Go
|
||||
diff := UnifiedDiff{
|
||||
A: difflib.SplitLines("foo\nbar\n"),
|
||||
B: difflib.SplitLines("foo\nbaz\n"),
|
||||
FromFile: "Original",
|
||||
ToFile: "Current",
|
||||
Context: 3,
|
||||
}
|
||||
text, _ := GetUnifiedDiffString(diff)
|
||||
fmt.Printf(text)
|
||||
```
|
||||
|
||||
would output:
|
||||
|
||||
```
|
||||
--- Original
|
||||
+++ Current
|
||||
@@ -1,3 +1,3 @@
|
||||
foo
|
||||
-bar
|
||||
+baz
|
||||
```
|
||||
|
1
vendor/github.com/sirupsen/logrus/.gitignore
generated
vendored
1
vendor/github.com/sirupsen/logrus/.gitignore
generated
vendored
@ -1 +0,0 @@
|
||||
logrus
|
13
vendor/github.com/sirupsen/logrus/.travis.yml
generated
vendored
13
vendor/github.com/sirupsen/logrus/.travis.yml
generated
vendored
@ -1,13 +0,0 @@
|
||||
language: go
|
||||
go:
|
||||
- 1.6.x
|
||||
- 1.7.x
|
||||
- 1.8.x
|
||||
- tip
|
||||
env:
|
||||
- GOMAXPROCS=4 GORACE=halt_on_error=1
|
||||
install:
|
||||
- go get github.com/stretchr/testify/assert
|
||||
script:
|
||||
- go test -race -v .
|
||||
- cd hooks/null && go test -race -v .
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user