Merge pull request #1473 from thaJeztah/bump_containerd

vendor: update containerd and dependencies to ed261720c8
This commit is contained in:
Akihiro Suda 2020-05-10 09:45:57 +09:00 committed by GitHub
commit 34e6985f78
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
179 changed files with 6735 additions and 1514 deletions

View File

@ -1,5 +1,5 @@
# cri dependencies # cri dependencies
github.com/docker/distribution 0d3efadf0154c2b8a4e7b6621fff9809655cc580 github.com/docker/distribution 2461543d988979529609e8cb6fca9ca190dc48da # v2.7.1
github.com/docker/docker 4634ce647cf2ce2c6031129ccd109e557244986f github.com/docker/docker 4634ce647cf2ce2c6031129ccd109e557244986f
github.com/opencontainers/selinux 0d49ba2a6aae052c614dfe5de62a158711a6c461 # v1.5.1 github.com/opencontainers/selinux 0d49ba2a6aae052c614dfe5de62a158711a6c461 # v1.5.1
github.com/tchap/go-patricia 666120de432aea38ab06bd5c818f04f4129882c9 # v2.2.6 github.com/tchap/go-patricia 666120de432aea38ab06bd5c818f04f4129882c9 # v2.2.6
@ -8,17 +8,17 @@ github.com/tchap/go-patricia 666120de432aea38ab06bd5c818f
github.com/beorn7/perks 37c8de3658fcb183f997c4e13e8337516ab753e6 # v1.0.1 github.com/beorn7/perks 37c8de3658fcb183f997c4e13e8337516ab753e6 # v1.0.1
github.com/BurntSushi/toml 3012a1dbe2e4bd1391d42b32f0577cb7bbc7f005 # v0.3.1 github.com/BurntSushi/toml 3012a1dbe2e4bd1391d42b32f0577cb7bbc7f005 # v0.3.1
github.com/cespare/xxhash/v2 d7df74196a9e781ede915320c11c378c1b2f3a1f # v2.1.1 github.com/cespare/xxhash/v2 d7df74196a9e781ede915320c11c378c1b2f3a1f # v2.1.1
github.com/containerd/cgroups 7347743e5d1e8500d9f27c8e748e689ed991d92b github.com/containerd/cgroups b4448137398923af7f4918b8b2ad8249172ca7a6
github.com/containerd/console 8375c3424e4d7b114e8a90a4a40c8e1b40d1d4e6 # v1.0.0 github.com/containerd/console 8375c3424e4d7b114e8a90a4a40c8e1b40d1d4e6 # v1.0.0
github.com/containerd/containerd 01310155947cb6eec37dcae29742a165e56acb4a github.com/containerd/containerd ed261720c86d1e700cd5d39175128322baac6dda
github.com/containerd/continuity 0ec596719c75bfd42908850990acea594b7593ac github.com/containerd/continuity 0ec596719c75bfd42908850990acea594b7593ac
github.com/containerd/fifo bda0ff6ed73c67bfb5e62bc9c697f146b7fd7f13 github.com/containerd/fifo bda0ff6ed73c67bfb5e62bc9c697f146b7fd7f13
github.com/containerd/go-runc a5c2862aed5e6358b305b0e16bfce58e0549b1cd github.com/containerd/go-runc a5c2862aed5e6358b305b0e16bfce58e0549b1cd
github.com/containerd/ttrpc 92c8520ef9f86600c650dd540266a007bf03670f # v1.0.0 github.com/containerd/ttrpc 72bb1b21c5b0a4a107f59dd85f6ab58e564b68d6 # v1.0.1
github.com/containerd/typeurl a93fcdb778cd272c6e9b3028b2f42d813e785d40 # v1.0.0 github.com/containerd/typeurl cd3ce7159eae562a4f60ceff37dada11a939d247 # v1.0.1
github.com/coreos/go-systemd/v22 2d78030078ef61b3cae27f42ad6d0e46db51b339 # v22.0.0 github.com/coreos/go-systemd/v22 2d78030078ef61b3cae27f42ad6d0e46db51b339 # v22.0.0
github.com/cpuguy83/go-md2man 7762f7e404f8416dfa1d9bb6a8c192aa9acb4d19 # v1.0.10 github.com/cpuguy83/go-md2man 7762f7e404f8416dfa1d9bb6a8c192aa9acb4d19 # v1.0.10
github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9 github.com/docker/go-events e31b211e4f1cd09aa76fe4ac244571fab96ae47f
github.com/docker/go-metrics b619b3592b65de4f087d9f16863a7e6ff905973c # v0.0.1 github.com/docker/go-metrics b619b3592b65de4f087d9f16863a7e6ff905973c # v0.0.1
github.com/docker/go-units 519db1ee28dcc9fd2474ae59fca29a810482bfb1 # v0.4.0 github.com/docker/go-units 519db1ee28dcc9fd2474ae59fca29a810482bfb1 # v0.4.0
github.com/godbus/dbus/v5 37bf87eef99d69c4f1d3528bd66e3a87dc201472 # v5.0.3 github.com/godbus/dbus/v5 37bf87eef99d69c4f1d3528bd66e3a87dc201472 # v5.0.3
@ -27,23 +27,25 @@ github.com/gogo/protobuf 5628607bb4c51c3157aacc3a50f0
github.com/golang/protobuf d23c5127dc24889085f8ccea5c9d560a57a879d8 # v1.3.3 github.com/golang/protobuf d23c5127dc24889085f8ccea5c9d560a57a879d8 # v1.3.3
github.com/google/uuid 0cd6bf5da1e1c83f8b45653022c74f71af0538a4 # v1.1.1 github.com/google/uuid 0cd6bf5da1e1c83f8b45653022c74f71af0538a4 # v1.1.1
github.com/grpc-ecosystem/go-grpc-prometheus c225b8c3b01faf2899099b768856a9e916e5087b # v1.2.0 github.com/grpc-ecosystem/go-grpc-prometheus c225b8c3b01faf2899099b768856a9e916e5087b # v1.2.0
github.com/hashicorp/errwrap 8a6fb523712970c966eefc6b39ed2c5e74880354 # v1.0.0
github.com/hashicorp/go-multierror 886a7fbe3eb1c874d46f623bfa70af45f425b3d1 # v1.0.0
github.com/hashicorp/golang-lru 7f827b33c0f158ec5dfbba01bb0b14a4541fd81d # v0.5.3 github.com/hashicorp/golang-lru 7f827b33c0f158ec5dfbba01bb0b14a4541fd81d # v0.5.3
github.com/imdario/mergo 7c29201646fa3de8506f701213473dd407f19646 # v0.3.7 github.com/imdario/mergo 7c29201646fa3de8506f701213473dd407f19646 # v0.3.7
github.com/konsorten/go-windows-terminal-sequences 5c8c8bd35d3832f5d134ae1e1e375b69a4d25242 # v1.0.1 github.com/konsorten/go-windows-terminal-sequences edb144dfd453055e1e49a3d8b410a660b5a87613 # v1.0.3
github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c # v1.0.1 github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c # v1.0.1
github.com/Microsoft/go-winio 6c72808b55902eae4c5943626030429ff20f3b63 # v0.4.14 github.com/Microsoft/go-winio 6c72808b55902eae4c5943626030429ff20f3b63 # v0.4.14
github.com/Microsoft/hcsshim 0b571ac85d7c5842b26d2571de4868634a4c39d7 # v0.8.7-24-g0b571ac8 github.com/Microsoft/hcsshim 5bc557dd210ff2caf615e6e22d398123de77fc11 # v0.8.9
github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7 github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7
github.com/opencontainers/image-spec d60099175f88c47cd379c4738d158884749ed235 # v1.0.1 github.com/opencontainers/image-spec d60099175f88c47cd379c4738d158884749ed235 # v1.0.1
github.com/opencontainers/runc dc9208a3303feef5b3839f4323d9beb36df0a9dd # v1.0.0-rc10 github.com/opencontainers/runc dc9208a3303feef5b3839f4323d9beb36df0a9dd # v1.0.0-rc10
github.com/opencontainers/runtime-spec 29686dbc5559d93fb1ef402eeda3e35c38d75af4 # v1.0.1-59-g29686db github.com/opencontainers/runtime-spec c4ee7d12c742ffe806cd9350b6af3b4b19faed6f # v1.0.2
github.com/pkg/errors ba968bfe8b2f7e042a574c888954fccecfa385b4 # v0.8.1 github.com/pkg/errors 614d223910a179a466c1767a985424175c39b465 # v0.9.1
github.com/prometheus/client_golang c42bebe5a5cddfc6b28cd639103369d8a75dfa89 # v1.3.0 github.com/prometheus/client_golang c42bebe5a5cddfc6b28cd639103369d8a75dfa89 # v1.3.0
github.com/prometheus/client_model d1d2010b5beead3fa1c5f271a5cf626e40b3ad6e # v0.1.0 github.com/prometheus/client_model d1d2010b5beead3fa1c5f271a5cf626e40b3ad6e # v0.1.0
github.com/prometheus/common 287d3e634a1e550c9e463dd7e5a75a422c614505 # v0.7.0 github.com/prometheus/common 287d3e634a1e550c9e463dd7e5a75a422c614505 # v0.7.0
github.com/prometheus/procfs 6d489fc7f1d9cd890a250f3ea3431b1744b9623f # v0.0.8 github.com/prometheus/procfs 6d489fc7f1d9cd890a250f3ea3431b1744b9623f # v0.0.8
github.com/russross/blackfriday 05f3235734ad95d0016f6a23902f06461fcf567a # v1.5.2 github.com/russross/blackfriday 05f3235734ad95d0016f6a23902f06461fcf567a # v1.5.2
github.com/sirupsen/logrus 8bdbc7bcc01dcbb8ec23dc8a28e332258d25251f # v1.4.1 github.com/sirupsen/logrus 60c74ad9be0d874af0ab0daef6ab07c5c5911f0d # v1.6.0
github.com/syndtr/gocapability d98352740cb2c55f81556b63d4a1ec64c5a319c2 github.com/syndtr/gocapability d98352740cb2c55f81556b63d4a1ec64c5a319c2
github.com/urfave/cli bfe2e925cfb6d44b40ad3a779165ea7e8aff9212 # v1.22.0 github.com/urfave/cli bfe2e925cfb6d44b40ad3a779165ea7e8aff9212 # v1.22.0
go.etcd.io/bbolt a0458a2b35708eef59eb5f620ceb3cd1c01a824d # v1.3.3 go.etcd.io/bbolt a0458a2b35708eef59eb5f620ceb3cd1c01a824d # v1.3.3
@ -56,7 +58,7 @@ google.golang.org/genproto e50cd9704f63023d62cd06a1994b
google.golang.org/grpc f495f5b15ae7ccda3b38c53a1bfcde4c1a58a2bc # v1.27.1 google.golang.org/grpc f495f5b15ae7ccda3b38c53a1bfcde4c1a58a2bc # v1.27.1
# cgroups dependencies # cgroups dependencies
github.com/cilium/ebpf 60c3aa43f488292fe2ee50fb8b833b383ca8ebbb github.com/cilium/ebpf 4032b1d8aae306b7bb94a2a11002932caf88c644
# kubernetes dependencies # kubernetes dependencies
github.com/davecgh/go-spew 8991bc29aa16c548c550c7ff78260e27b9ab7c73 # v1.1.1 github.com/davecgh/go-spew 8991bc29aa16c548c550c7ff78260e27b9ab7c73 # v1.1.1

View File

@ -16,6 +16,11 @@ When you submit a pull request, a CLA-bot will automatically determine whether y
a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions
provided by the bot. You will only need to do this once across all repos using our CLA. provided by the bot. You will only need to do this once across all repos using our CLA.
We also ask that contributors [sign their commits](https://git-scm.com/docs/git-commit) using `git commit -s` or `git commit --signoff` to certify they either authored the work themselves or otherwise have permission to use it in this project.
## Code of Conduct
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.

View File

@ -112,7 +112,10 @@ type Options struct {
// hypervisor isolated utility vm. // hypervisor isolated utility vm.
// //
// The platform default is 1024MB if omitted. // The platform default is 1024MB if omitted.
VmMemorySizeInMb int32 `protobuf:"varint,9,opt,name=vm_memory_size_in_mb,json=vmMemorySizeInMb,proto3" json:"vm_memory_size_in_mb,omitempty"` VmMemorySizeInMb int32 `protobuf:"varint,9,opt,name=vm_memory_size_in_mb,json=vmMemorySizeInMb,proto3" json:"vm_memory_size_in_mb,omitempty"`
// GPUVHDPath is the path to the gpu vhd to add to the uvm
// when a container requests a gpu
GPUVHDPath string `protobuf:"bytes,10,opt,name=GPUVHDPath,json=gPUVHDPath,proto3" json:"GPUVHDPath,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"` XXX_sizecache int32 `json:"-"`
@ -211,55 +214,56 @@ func init() {
} }
var fileDescriptor_b643df6839c75082 = []byte{ var fileDescriptor_b643df6839c75082 = []byte{
// 760 bytes of a gzipped FileDescriptorProto // 777 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0xcb, 0x6e, 0xdb, 0x46, 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0xcd, 0x6f, 0xdb, 0x36,
0x14, 0x15, 0x63, 0xbd, 0x78, 0x53, 0x3b, 0xf4, 0x54, 0x0b, 0xc2, 0x6d, 0x25, 0xc1, 0x59, 0xc4, 0x1c, 0xb5, 0x9a, 0xf8, 0x43, 0xbf, 0x2e, 0xa9, 0xc2, 0xf9, 0x20, 0x64, 0x9b, 0x6d, 0xa4, 0x87,
0x41, 0x63, 0xd2, 0x4e, 0x97, 0x5d, 0x55, 0x96, 0x8c, 0xb2, 0xa8, 0x6d, 0x82, 0x32, 0x9a, 0x3e, 0xa6, 0x58, 0x23, 0x25, 0xdd, 0x71, 0xa7, 0x39, 0x76, 0x56, 0x0d, 0x4b, 0x22, 0xc8, 0x59, 0xbb,
0x16, 0x03, 0x3e, 0xc6, 0xd4, 0x20, 0x1a, 0x0e, 0x31, 0x33, 0x52, 0xad, 0xac, 0xfa, 0x09, 0xfd, 0x8f, 0x03, 0xa1, 0x0f, 0x46, 0x26, 0x6a, 0x8a, 0x02, 0x49, 0x7b, 0x71, 0x4f, 0xfb, 0x13, 0xf6,
0x88, 0x7e, 0x8c, 0x97, 0x5d, 0x16, 0x28, 0xe0, 0x36, 0xfa, 0x92, 0x62, 0x86, 0xa4, 0x83, 0x06, 0x47, 0xed, 0x90, 0xe3, 0x8e, 0x03, 0x06, 0x64, 0xab, 0xff, 0x92, 0x81, 0x94, 0x94, 0x62, 0x45,
0x41, 0x37, 0x5d, 0x69, 0x78, 0xce, 0x99, 0x73, 0x1f, 0x73, 0x20, 0xb8, 0xca, 0xa9, 0x5a, 0xac, 0xb1, 0xcb, 0x4e, 0xa6, 0xde, 0x7b, 0x7c, 0xbf, 0x0f, 0x3e, 0x18, 0x2e, 0x73, 0xaa, 0xe6, 0xcb,
0x12, 0x2f, 0xe5, 0xcc, 0xbf, 0xa0, 0xa9, 0xe0, 0x92, 0xdf, 0x28, 0x7f, 0x91, 0x4a, 0xb9, 0xa0, 0xc4, 0x4b, 0x39, 0xf3, 0xcf, 0x69, 0x2a, 0xb8, 0xe4, 0xd7, 0xca, 0x9f, 0xa7, 0x52, 0xce, 0x29,
0xcc, 0x4f, 0x59, 0xe6, 0xa7, 0xbc, 0x50, 0x31, 0x2d, 0x88, 0xc8, 0x8e, 0x35, 0x76, 0x2c, 0x56, 0xf3, 0x53, 0x96, 0xf9, 0x29, 0x2f, 0x54, 0x4c, 0x0b, 0x22, 0xb2, 0x23, 0x8d, 0x1d, 0x89, 0x65,
0xc5, 0x22, 0x95, 0xc7, 0xeb, 0x53, 0x9f, 0x97, 0x8a, 0xf2, 0x42, 0xfa, 0x15, 0xe2, 0x95, 0x82, 0x31, 0x4f, 0xe5, 0xd1, 0xea, 0xc4, 0xe7, 0xa5, 0xa2, 0xbc, 0x90, 0x7e, 0x85, 0x78, 0xa5, 0xe0,
0x2b, 0x8e, 0x06, 0xef, 0xf4, 0x5e, 0x4d, 0xac, 0x4f, 0x0f, 0x06, 0x39, 0xcf, 0xb9, 0x11, 0xf8, 0x8a, 0xa3, 0xfe, 0x3b, 0xbd, 0x57, 0x13, 0xab, 0x93, 0xfd, 0x7e, 0xce, 0x73, 0x6e, 0x04, 0xbe,
0xfa, 0x54, 0x69, 0x0f, 0x46, 0x39, 0xe7, 0xf9, 0x92, 0xf8, 0xe6, 0x2b, 0x59, 0xdd, 0xf8, 0x8a, 0x3e, 0x55, 0xda, 0xfd, 0x61, 0xce, 0x79, 0xbe, 0x20, 0xbe, 0xf9, 0x4a, 0x96, 0xd7, 0xbe, 0xa2,
0x32, 0x22, 0x55, 0xcc, 0xca, 0x4a, 0x70, 0xf8, 0x5b, 0x1b, 0x7a, 0x57, 0x55, 0x15, 0x34, 0x80, 0x8c, 0x48, 0x15, 0xb3, 0xb2, 0x12, 0x1c, 0xfc, 0xb6, 0x0d, 0xdd, 0xcb, 0xaa, 0x0a, 0xea, 0x43,
0x4e, 0x46, 0x92, 0x55, 0xee, 0x5a, 0x63, 0xeb, 0xa8, 0x1f, 0x55, 0x1f, 0xe8, 0x1c, 0xc0, 0x1c, 0x3b, 0x23, 0xc9, 0x32, 0x77, 0xad, 0x91, 0x75, 0xd8, 0x8b, 0xaa, 0x0f, 0x74, 0x06, 0x60, 0x0e,
0xb0, 0xda, 0x94, 0xc4, 0x7d, 0x34, 0xb6, 0x8e, 0xf6, 0x5e, 0x3e, 0xf3, 0x3e, 0xd4, 0x83, 0x57, 0x58, 0xad, 0x4b, 0xe2, 0x3e, 0x18, 0x59, 0x87, 0xbb, 0xcf, 0x9f, 0x78, 0x1f, 0xea, 0xc1, 0xab,
0x1b, 0x79, 0x53, 0xad, 0xbf, 0xde, 0x94, 0x24, 0xb2, 0xb3, 0xe6, 0x88, 0x9e, 0xc2, 0xae, 0x20, 0x8d, 0xbc, 0x89, 0xd6, 0x5f, 0xad, 0x4b, 0x12, 0xd9, 0x59, 0x73, 0x44, 0x8f, 0x61, 0x47, 0x90,
0x39, 0x95, 0x4a, 0x6c, 0xb0, 0xe0, 0x5c, 0xb9, 0x3b, 0x63, 0xeb, 0xc8, 0x8e, 0x3e, 0x6a, 0xc0, 0x9c, 0x4a, 0x25, 0xd6, 0x58, 0x70, 0xae, 0xdc, 0xad, 0x91, 0x75, 0x68, 0x47, 0x1f, 0x35, 0x60,
0x88, 0x73, 0xa5, 0x45, 0x32, 0x2e, 0xb2, 0x84, 0xdf, 0x62, 0xca, 0xe2, 0x9c, 0xb8, 0xed, 0x4a, 0xc4, 0xb9, 0xd2, 0x22, 0x19, 0x17, 0x59, 0xc2, 0x6f, 0x30, 0x65, 0x71, 0x4e, 0xdc, 0xed, 0x4a,
0x54, 0x83, 0x81, 0xc6, 0xd0, 0x73, 0x70, 0x1a, 0x51, 0xb9, 0x8c, 0xd5, 0x0d, 0x17, 0xcc, 0xed, 0x54, 0x83, 0x81, 0xc6, 0xd0, 0x53, 0x70, 0x1a, 0x51, 0xb9, 0x88, 0xd5, 0x35, 0x17, 0xcc, 0x6d,
0x18, 0xdd, 0x93, 0x1a, 0x0f, 0x6b, 0x18, 0xfd, 0x04, 0xfb, 0x0f, 0x7e, 0x92, 0x2f, 0x63, 0xdd, 0x1b, 0xdd, 0xa3, 0x1a, 0x0f, 0x6b, 0x18, 0xfd, 0x04, 0x7b, 0xf7, 0x7e, 0x92, 0x2f, 0x62, 0xdd,
0x9f, 0xdb, 0x35, 0x33, 0x78, 0xff, 0x3d, 0xc3, 0xbc, 0xae, 0xd8, 0xdc, 0x8a, 0x9a, 0x9a, 0x0f, 0x9f, 0xdb, 0x31, 0x33, 0x78, 0xff, 0x3d, 0xc3, 0xac, 0xae, 0xd8, 0xdc, 0x8a, 0x9a, 0x9a, 0xf7,
0x08, 0xf2, 0x61, 0x90, 0x70, 0xae, 0xf0, 0x0d, 0x5d, 0x12, 0x69, 0x66, 0xc2, 0x65, 0xac, 0x16, 0x08, 0xf2, 0xa1, 0x9f, 0x70, 0xae, 0xf0, 0x35, 0x5d, 0x10, 0x69, 0x66, 0xc2, 0x65, 0xac, 0xe6,
0x6e, 0xcf, 0xf4, 0xb2, 0xaf, 0xb9, 0x73, 0x4d, 0xe9, 0xc9, 0xc2, 0x58, 0x2d, 0xd0, 0x0b, 0x40, 0x6e, 0xd7, 0xf4, 0xb2, 0xa7, 0xb9, 0x33, 0x4d, 0xe9, 0xc9, 0xc2, 0x58, 0xcd, 0xd1, 0x33, 0x40,
0x6b, 0x86, 0x4b, 0xc1, 0x53, 0x22, 0x25, 0x17, 0x38, 0xe5, 0xab, 0x42, 0xb9, 0xfd, 0xb1, 0x75, 0x2b, 0x86, 0x4b, 0xc1, 0x53, 0x22, 0x25, 0x17, 0x38, 0xe5, 0xcb, 0x42, 0xb9, 0xbd, 0x91, 0x75,
0xd4, 0x89, 0x9c, 0x35, 0x0b, 0x1b, 0xe2, 0x4c, 0xe3, 0xc8, 0x83, 0xc1, 0x9a, 0x61, 0x46, 0x18, 0xd8, 0x8e, 0x9c, 0x15, 0x0b, 0x1b, 0xe2, 0x54, 0xe3, 0xc8, 0x83, 0xfe, 0x8a, 0x61, 0x46, 0x18,
0x17, 0x1b, 0x2c, 0xe9, 0x1b, 0x82, 0x69, 0x81, 0x59, 0xe2, 0xda, 0x8d, 0xfe, 0xc2, 0x50, 0x73, 0x17, 0x6b, 0x2c, 0xe9, 0x1b, 0x82, 0x69, 0x81, 0x59, 0xe2, 0xda, 0x8d, 0xfe, 0xdc, 0x50, 0x33,
0xfa, 0x86, 0x04, 0xc5, 0x45, 0x72, 0xf8, 0x1c, 0xec, 0x87, 0xc5, 0x23, 0x1b, 0x3a, 0x97, 0x61, 0xfa, 0x86, 0x04, 0xc5, 0x79, 0x82, 0x06, 0x00, 0x5f, 0x87, 0xdf, 0xbd, 0x7c, 0x31, 0xd1, 0xb5,
0x10, 0xce, 0x9c, 0x16, 0xea, 0x43, 0xfb, 0x3c, 0xf8, 0x76, 0xe6, 0x58, 0xa8, 0x07, 0x3b, 0xb3, 0x5c, 0x30, 0x4d, 0x40, 0x7e, 0x8f, 0x1c, 0x3c, 0x05, 0xfb, 0xfe, 0x61, 0x90, 0x0d, 0xed, 0x8b,
0xeb, 0x57, 0xce, 0xa3, 0x43, 0x1f, 0x9c, 0xf7, 0xe7, 0x43, 0x8f, 0xa1, 0x17, 0x46, 0x57, 0x67, 0x30, 0x08, 0xa7, 0x4e, 0x0b, 0xf5, 0x60, 0xfb, 0x2c, 0xf8, 0x76, 0xea, 0x58, 0xa8, 0x0b, 0x5b,
0xb3, 0xf9, 0xdc, 0x69, 0xa1, 0x3d, 0x80, 0xaf, 0x7f, 0x08, 0x67, 0xd1, 0x77, 0xc1, 0xfc, 0x2a, 0xd3, 0xab, 0x57, 0xce, 0x83, 0x03, 0x1f, 0x9c, 0xf7, 0xe7, 0x47, 0x0f, 0xa1, 0x1b, 0x46, 0x97,
0x72, 0xac, 0xc3, 0x3f, 0x77, 0x60, 0xaf, 0x6e, 0x6f, 0x4a, 0x54, 0x4c, 0x97, 0x12, 0x7d, 0x06, 0xa7, 0xd3, 0xd9, 0xcc, 0x69, 0xa1, 0x5d, 0x80, 0x17, 0x3f, 0x84, 0xd3, 0xe8, 0x65, 0x30, 0xbb,
0x60, 0x9e, 0x08, 0x17, 0x31, 0x23, 0x26, 0x32, 0x76, 0x64, 0x1b, 0xe4, 0x32, 0x66, 0x04, 0x9d, 0x8c, 0x1c, 0xeb, 0xe0, 0xcf, 0x2d, 0xd8, 0xad, 0xdb, 0x9f, 0x10, 0x15, 0xd3, 0x85, 0x44, 0x9f,
0x01, 0xa4, 0x82, 0xc4, 0x8a, 0x64, 0x38, 0x56, 0x26, 0x36, 0x8f, 0x5f, 0x1e, 0x78, 0x55, 0x1c, 0x01, 0x98, 0x27, 0xc4, 0x45, 0xcc, 0x88, 0x89, 0x94, 0x1d, 0xd9, 0x06, 0xb9, 0x88, 0x19, 0x41,
0xbd, 0x26, 0x8e, 0xde, 0x75, 0x13, 0xc7, 0x49, 0xff, 0xee, 0x7e, 0xd4, 0xfa, 0xf5, 0xaf, 0x91, 0xa7, 0x00, 0xa9, 0x20, 0xb1, 0x22, 0x19, 0x8e, 0x95, 0x89, 0xd5, 0xc3, 0xe7, 0xfb, 0x5e, 0x15,
0x15, 0xd9, 0xf5, 0xbd, 0xaf, 0x14, 0xfa, 0x1c, 0xd0, 0x6b, 0x22, 0x0a, 0xb2, 0xc4, 0x3a, 0xb7, 0x57, 0xaf, 0x89, 0xab, 0x77, 0xd5, 0xc4, 0x75, 0xdc, 0xbb, 0xbd, 0x1b, 0xb6, 0x7e, 0xfd, 0x6b,
0xf8, 0xf4, 0xe4, 0x04, 0x17, 0xd2, 0x04, 0xa7, 0x1d, 0x3d, 0xa9, 0x18, 0xed, 0x70, 0x7a, 0x72, 0x68, 0x45, 0x76, 0x7d, 0xef, 0x2b, 0x85, 0x3e, 0x07, 0xf4, 0x9a, 0x88, 0x82, 0x2c, 0xb0, 0xce,
0x72, 0x29, 0x91, 0x07, 0x1f, 0xd7, 0xcb, 0x4a, 0x39, 0x63, 0x54, 0xe1, 0x64, 0xa3, 0x88, 0x34, 0x35, 0x3e, 0x39, 0x3e, 0xc6, 0x85, 0x34, 0xc1, 0xda, 0x8e, 0x1e, 0x55, 0x8c, 0x76, 0x38, 0x39,
0x09, 0x6a, 0x47, 0xfb, 0x15, 0x75, 0x66, 0x98, 0x89, 0x26, 0xd0, 0x39, 0x8c, 0x6b, 0xfd, 0xcf, 0x3e, 0xbe, 0x90, 0xc8, 0x83, 0x8f, 0xeb, 0x65, 0xa6, 0x9c, 0x31, 0xaa, 0x70, 0xb2, 0x56, 0x44,
0x5c, 0xbc, 0xa6, 0x45, 0x8e, 0x25, 0x51, 0xb8, 0x14, 0x74, 0x1d, 0x2b, 0x52, 0x5f, 0xee, 0x98, 0x9a, 0x84, 0x6d, 0x47, 0x7b, 0x15, 0x75, 0x6a, 0x98, 0xb1, 0x26, 0xd0, 0x19, 0x8c, 0x6a, 0xfd,
0xcb, 0x9f, 0x56, 0xba, 0x57, 0x95, 0x6c, 0x4e, 0x54, 0x58, 0x89, 0x2a, 0x9f, 0x29, 0x8c, 0x3e, 0xcf, 0x5c, 0xbc, 0xa6, 0x45, 0x8e, 0x25, 0x51, 0xb8, 0x14, 0x74, 0x15, 0x2b, 0x52, 0x5f, 0x6e,
0xe0, 0x23, 0x17, 0xb1, 0x20, 0x59, 0x6d, 0xd3, 0x35, 0x36, 0x9f, 0xbc, 0x6f, 0x33, 0x37, 0x9a, 0x9b, 0xcb, 0x9f, 0x56, 0xba, 0x57, 0x95, 0x6c, 0x46, 0x54, 0x58, 0x89, 0x2a, 0x9f, 0x09, 0x0c,
0xca, 0xe5, 0x05, 0x40, 0x1d, 0x0c, 0x4c, 0x33, 0x13, 0xa1, 0xdd, 0xc9, 0xee, 0xf6, 0x7e, 0x64, 0x3f, 0xe0, 0x23, 0xe7, 0xb1, 0x20, 0x59, 0x6d, 0xd3, 0x31, 0x36, 0x9f, 0xbc, 0x6f, 0x33, 0x33,
0xd7, 0x6b, 0x0f, 0xa6, 0x91, 0x5d, 0x0b, 0x82, 0x0c, 0x3d, 0x03, 0x67, 0x25, 0x89, 0xf8, 0xd7, 0x9a, 0xca, 0xe5, 0x19, 0x40, 0x1d, 0x1c, 0x4c, 0x33, 0x13, 0xb1, 0x9d, 0xf1, 0xce, 0xe6, 0x6e,
0x5a, 0xfa, 0xa6, 0xc8, 0xae, 0xc6, 0xdf, 0x2d, 0xe5, 0x29, 0xf4, 0xc8, 0x2d, 0x49, 0xb5, 0xa7, 0x68, 0xd7, 0x6b, 0x0f, 0x26, 0x91, 0x5d, 0x0b, 0x82, 0x0c, 0x3d, 0x01, 0x67, 0x29, 0x89, 0xf8,
0xce, 0x8d, 0x3d, 0x81, 0xed, 0xfd, 0xa8, 0x3b, 0xbb, 0x25, 0x69, 0x30, 0x8d, 0xba, 0x9a, 0x0a, 0xd7, 0x5a, 0x7a, 0xa6, 0xc8, 0x8e, 0xc6, 0xdf, 0x2d, 0xe5, 0x31, 0x74, 0xc9, 0x0d, 0x49, 0xb5,
0xb2, 0x49, 0x76, 0xf7, 0x76, 0xd8, 0xfa, 0xe3, 0xed, 0xb0, 0xf5, 0xcb, 0x76, 0x68, 0xdd, 0x6d, 0xa7, 0xce, 0x95, 0x3d, 0x86, 0xcd, 0xdd, 0xb0, 0x33, 0xbd, 0x21, 0x69, 0x30, 0x89, 0x3a, 0x9a,
0x87, 0xd6, 0xef, 0xdb, 0xa1, 0xf5, 0xf7, 0x76, 0x68, 0xfd, 0xf8, 0xcd, 0xff, 0xff, 0xf3, 0xfa, 0x0a, 0xb2, 0x71, 0x76, 0xfb, 0x76, 0xd0, 0xfa, 0xe3, 0xed, 0xa0, 0xf5, 0xcb, 0x66, 0x60, 0xdd,
0xb2, 0xfe, 0xfd, 0xbe, 0x95, 0x74, 0xcd, 0xbb, 0x7f, 0xf1, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x6e, 0x06, 0xd6, 0xef, 0x9b, 0x81, 0xf5, 0xf7, 0x66, 0x60, 0xfd, 0xf8, 0xcd, 0xff, 0xff, 0x73,
0xd7, 0x90, 0x00, 0xaf, 0x13, 0x05, 0x00, 0x00, 0xfb, 0xb2, 0xfe, 0xfd, 0xbe, 0x95, 0x74, 0xcc, 0xbb, 0x7f, 0xf1, 0x4f, 0x00, 0x00, 0x00, 0xff,
0xff, 0xc9, 0xeb, 0xae, 0x6f, 0x33, 0x05, 0x00, 0x00,
} }
func (m *Options) Marshal() (dAtA []byte, err error) { func (m *Options) Marshal() (dAtA []byte, err error) {
@ -331,6 +335,12 @@ func (m *Options) MarshalTo(dAtA []byte) (int, error) {
i++ i++
i = encodeVarintRunhcs(dAtA, i, uint64(m.VmMemorySizeInMb)) i = encodeVarintRunhcs(dAtA, i, uint64(m.VmMemorySizeInMb))
} }
if len(m.GPUVHDPath) > 0 {
dAtA[i] = 0x52
i++
i = encodeVarintRunhcs(dAtA, i, uint64(len(m.GPUVHDPath)))
i += copy(dAtA[i:], m.GPUVHDPath)
}
if m.XXX_unrecognized != nil { if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized) i += copy(dAtA[i:], m.XXX_unrecognized)
} }
@ -454,6 +464,10 @@ func (m *Options) Size() (n int) {
if m.VmMemorySizeInMb != 0 { if m.VmMemorySizeInMb != 0 {
n += 1 + sovRunhcs(uint64(m.VmMemorySizeInMb)) n += 1 + sovRunhcs(uint64(m.VmMemorySizeInMb))
} }
l = len(m.GPUVHDPath)
if l > 0 {
n += 1 + l + sovRunhcs(uint64(l))
}
if m.XXX_unrecognized != nil { if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized) n += len(m.XXX_unrecognized)
} }
@ -527,6 +541,7 @@ func (this *Options) String() string {
`BootFilesRootPath:` + fmt.Sprintf("%v", this.BootFilesRootPath) + `,`, `BootFilesRootPath:` + fmt.Sprintf("%v", this.BootFilesRootPath) + `,`,
`VmProcessorCount:` + fmt.Sprintf("%v", this.VmProcessorCount) + `,`, `VmProcessorCount:` + fmt.Sprintf("%v", this.VmProcessorCount) + `,`,
`VmMemorySizeInMb:` + fmt.Sprintf("%v", this.VmMemorySizeInMb) + `,`, `VmMemorySizeInMb:` + fmt.Sprintf("%v", this.VmMemorySizeInMb) + `,`,
`GPUVHDPath:` + fmt.Sprintf("%v", this.GPUVHDPath) + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`, `}`,
}, "") }, "")
@ -812,6 +827,38 @@ func (m *Options) Unmarshal(dAtA []byte) error {
break break
} }
} }
case 10:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field GPUVHDPath", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRunhcs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthRunhcs
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthRunhcs
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.GPUVHDPath = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default: default:
iNdEx = preIndex iNdEx = preIndex
skippy, err := skipRunhcs(dAtA[iNdEx:]) skippy, err := skipRunhcs(dAtA[iNdEx:])

View File

@ -59,6 +59,10 @@ message Options {
// //
// The platform default is 1024MB if omitted. // The platform default is 1024MB if omitted.
int32 vm_memory_size_in_mb = 9; int32 vm_memory_size_in_mb = 9;
// GPUVHDPath is the path to the gpu vhd to add to the uvm
// when a container requests a gpu
string GPUVHDPath = 10;
} }
// ProcessDetails contains additional information about a process. This is the additional // ProcessDetails contains additional information about a process. This is the additional

View File

@ -4,34 +4,32 @@ go 1.13
require ( require (
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5 github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5
github.com/blang/semver v3.1.0+incompatible // indirect
github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1 github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1
github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69 github.com/containerd/containerd v1.3.2
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc // indirect github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc // indirect
github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448 // indirect github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448 // indirect
github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3 github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd
github.com/gogo/protobuf v1.2.1 github.com/gogo/protobuf v1.3.1
github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce // indirect github.com/golang/protobuf v1.3.2 // indirect
github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874 // indirect github.com/kr/pretty v0.1.0 // indirect
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2 // indirect github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2 // indirect
github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f // indirect github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f // indirect
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700 github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700
github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39
github.com/pkg/errors v0.8.1 github.com/pkg/errors v0.8.1
github.com/prometheus/procfs v0.0.5 // indirect github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7 // indirect
github.com/sirupsen/logrus v1.4.1 github.com/sirupsen/logrus v1.4.2
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8 // indirect github.com/stretchr/testify v1.4.0 // indirect
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5 github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f // indirect
go.opencensus.io v0.22.0 go.opencensus.io v0.22.0
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 // indirect
golang.org/x/sync v0.0.0-20190423024810-112230192c58
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3 golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3
google.golang.org/grpc v1.20.1 google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873 // indirect
google.golang.org/grpc v1.23.1
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
gopkg.in/yaml.v2 v2.2.8 // indirect
gotest.tools v2.2.0+incompatible // indirect gotest.tools v2.2.0+incompatible // indirect
k8s.io/kubernetes v1.13.0
) )

View File

@ -55,6 +55,15 @@ import (
//sys hcnDeleteLoadBalancer(id *_guid, result **uint16) (hr error) = computenetwork.HcnDeleteLoadBalancer? //sys hcnDeleteLoadBalancer(id *_guid, result **uint16) (hr error) = computenetwork.HcnDeleteLoadBalancer?
//sys hcnCloseLoadBalancer(loadBalancer hcnLoadBalancer) (hr error) = computenetwork.HcnCloseLoadBalancer? //sys hcnCloseLoadBalancer(loadBalancer hcnLoadBalancer) (hr error) = computenetwork.HcnCloseLoadBalancer?
// SDN Routes
//sys hcnEnumerateRoutes(query string, routes **uint16, result **uint16) (hr error) = computenetwork.HcnEnumerateSdnRoutes?
//sys hcnCreateRoute(id *_guid, settings string, route *hcnRoute, result **uint16) (hr error) = computenetwork.HcnCreateSdnRoute?
//sys hcnOpenRoute(id *_guid, route *hcnRoute, result **uint16) (hr error) = computenetwork.HcnOpenSdnRoute?
//sys hcnModifyRoute(route hcnRoute, settings string, result **uint16) (hr error) = computenetwork.HcnModifySdnRoute?
//sys hcnQueryRouteProperties(route hcnRoute, query string, properties **uint16, result **uint16) (hr error) = computenetwork.HcnQuerySdnRouteProperties?
//sys hcnDeleteRoute(id *_guid, result **uint16) (hr error) = computenetwork.HcnDeleteSdnRoute?
//sys hcnCloseRoute(route hcnRoute) (hr error) = computenetwork.HcnCloseSdnRoute?
// Service // Service
//sys hcnOpenService(service *hcnService, result **uint16) (hr error) = computenetwork.HcnOpenService? //sys hcnOpenService(service *hcnService, result **uint16) (hr error) = computenetwork.HcnOpenService?
//sys hcnRegisterServiceCallback(service hcnService, callback int32, context int32, callbackHandle *hcnCallbackHandle) (hr error) = computenetwork.HcnRegisterServiceCallback? //sys hcnRegisterServiceCallback(service hcnService, callback int32, context int32, callbackHandle *hcnCallbackHandle) (hr error) = computenetwork.HcnRegisterServiceCallback?
@ -67,6 +76,7 @@ type hcnNetwork syscall.Handle
type hcnEndpoint syscall.Handle type hcnEndpoint syscall.Handle
type hcnNamespace syscall.Handle type hcnNamespace syscall.Handle
type hcnLoadBalancer syscall.Handle type hcnLoadBalancer syscall.Handle
type hcnRoute syscall.Handle
type hcnService syscall.Handle type hcnService syscall.Handle
type hcnCallbackHandle syscall.Handle type hcnCallbackHandle syscall.Handle
@ -188,6 +198,15 @@ func SessionAffinitySupported() error {
return platformDoesNotSupportError("Session Affinity") return platformDoesNotSupportError("Session Affinity")
} }
// IPv6DualStackSupported returns an error if the HCN version does not support IPv6DualStack.
func IPv6DualStackSupported() error {
supported := GetSupportedFeatures()
if supported.IPv6DualStack {
return nil
}
return platformDoesNotSupportError("IPv6 DualStack")
}
// RequestType are the different operations performed to settings. // RequestType are the different operations performed to settings.
// Used to update the settings of Endpoint/Namespace objects. // Used to update the settings of Endpoint/Namespace objects.
type RequestType string type RequestType string

View File

@ -17,6 +17,7 @@ var (
errInvalidEndpointID = errors.New("invalid endpoint ID") errInvalidEndpointID = errors.New("invalid endpoint ID")
errInvalidNamespaceID = errors.New("invalid namespace ID") errInvalidNamespaceID = errors.New("invalid namespace ID")
errInvalidLoadBalancerID = errors.New("invalid load balancer ID") errInvalidLoadBalancerID = errors.New("invalid load balancer ID")
errInvalidRouteID = errors.New("invalid route ID")
) )
func checkForErrors(methodName string, hr error, resultBuffer *uint16) error { func checkForErrors(methodName string, hr error, resultBuffer *uint16) error {
@ -133,6 +134,15 @@ func (e LoadBalancerNotFoundError) Error() string {
return fmt.Sprintf("LoadBalancer %q not found", e.LoadBalancerId) return fmt.Sprintf("LoadBalancer %q not found", e.LoadBalancerId)
} }
// RouteNotFoundError results from a failed seach for a route by Id
type RouteNotFoundError struct {
RouteId string
}
func (e RouteNotFoundError) Error() string {
return fmt.Sprintf("SDN Route %q not found", e.RouteId)
}
// IsNotFoundError returns a boolean indicating whether the error was caused by // IsNotFoundError returns a boolean indicating whether the error was caused by
// a resource not being found. // a resource not being found.
func IsNotFoundError(err error) bool { func IsNotFoundError(err error) bool {
@ -145,6 +155,8 @@ func IsNotFoundError(err error) bool {
return true return true
case LoadBalancerNotFoundError: case LoadBalancerNotFoundError:
return true return true
case RouteNotFoundError:
return true
case *hcserror.HcsError: case *hcserror.HcsError:
return pe.Err == hcs.ErrElementNotFound return pe.Err == hcs.ErrElementNotFound
} }

View File

@ -49,8 +49,13 @@ var (
VersionRange{MinVersion: Version{Major: 9, Minor: 3}, MaxVersion: Version{Major: 9, Minor: math.MaxInt32}}, VersionRange{MinVersion: Version{Major: 9, Minor: 3}, MaxVersion: Version{Major: 9, Minor: math.MaxInt32}},
VersionRange{MinVersion: Version{Major: 10, Minor: 4}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}, VersionRange{MinVersion: Version{Major: 10, Minor: 4}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}},
} }
// HNS 11.10 allows for session affinity for loadbalancing // HNS 12.0 allows for session affinity for loadbalancing
SessionAffinityVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 11, Minor: 10}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} SessionAffinityVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 12, Minor: 0}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}}
// HNS 10.5 through 11 (not included) and 12.0+ supports Ipv6 dual stack.
IPv6DualStackVersion = VersionRanges{
VersionRange{MinVersion: Version{Major: 10, Minor: 5}, MaxVersion: Version{Major: 10, Minor: math.MaxInt32}},
VersionRange{MinVersion: Version{Major: 12, Minor: 0}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}},
}
) )
// GetGlobals returns the global properties of the HCN Service. // GetGlobals returns the global properties of the HCN Service.

266
vendor/github.com/Microsoft/hcsshim/hcn/hcnroute.go generated vendored Normal file
View File

@ -0,0 +1,266 @@
package hcn
import (
"encoding/json"
"errors"
"github.com/Microsoft/go-winio/pkg/guid"
"github.com/Microsoft/hcsshim/internal/interop"
"github.com/sirupsen/logrus"
)
// HostComputeRoute represents SDN routes.
type HostComputeRoute struct {
ID string `json:"ID,omitempty"`
HostComputeEndpoints []string `json:",omitempty"`
Setting []SDNRoutePolicySetting `json:",omitempty"`
SchemaVersion SchemaVersion `json:",omitempty"`
}
// ListRoutes makes a call to list all available routes.
func ListRoutes() ([]HostComputeRoute, error) {
hcnQuery := defaultQuery()
routes, err := ListRoutesQuery(hcnQuery)
if err != nil {
return nil, err
}
return routes, nil
}
// ListRoutesQuery makes a call to query the list of available routes.
func ListRoutesQuery(query HostComputeQuery) ([]HostComputeRoute, error) {
queryJSON, err := json.Marshal(query)
if err != nil {
return nil, err
}
routes, err := enumerateRoutes(string(queryJSON))
if err != nil {
return nil, err
}
return routes, nil
}
// GetRouteByID returns the route specified by Id.
func GetRouteByID(routeID string) (*HostComputeRoute, error) {
hcnQuery := defaultQuery()
mapA := map[string]string{"ID": routeID}
filter, err := json.Marshal(mapA)
if err != nil {
return nil, err
}
hcnQuery.Filter = string(filter)
routes, err := ListRoutesQuery(hcnQuery)
if err != nil {
return nil, err
}
if len(routes) == 0 {
return nil, RouteNotFoundError{RouteId: routeID}
}
return &routes[0], err
}
// Create Route.
func (route *HostComputeRoute) Create() (*HostComputeRoute, error) {
logrus.Debugf("hcn::HostComputeRoute::Create id=%s", route.ID)
jsonString, err := json.Marshal(route)
if err != nil {
return nil, err
}
logrus.Debugf("hcn::HostComputeRoute::Create JSON: %s", jsonString)
route, hcnErr := createRoute(string(jsonString))
if hcnErr != nil {
return nil, hcnErr
}
return route, nil
}
// Delete Route.
func (route *HostComputeRoute) Delete() error {
logrus.Debugf("hcn::HostComputeRoute::Delete id=%s", route.ID)
existingRoute, _ := GetRouteByID(route.ID)
if existingRoute != nil {
if err := deleteRoute(route.ID); err != nil {
return err
}
}
return nil
}
// AddEndpoint add an endpoint to a route
// Since HCNRoute doesn't implement modify functionality, add operation is essentially delete and add
func (route *HostComputeRoute) AddEndpoint(endpoint *HostComputeEndpoint) (*HostComputeRoute, error) {
logrus.Debugf("hcn::HostComputeRoute::AddEndpoint route=%s endpoint=%s", route.ID, endpoint.Id)
err := route.Delete()
if err != nil {
return nil, err
}
// Add Endpoint to the Existing List
route.HostComputeEndpoints = append(route.HostComputeEndpoints, endpoint.Id)
return route.Create()
}
// RemoveEndpoint removes an endpoint from a route
// Since HCNRoute doesn't implement modify functionality, remove operation is essentially delete and add
func (route *HostComputeRoute) RemoveEndpoint(endpoint *HostComputeEndpoint) (*HostComputeRoute, error) {
logrus.Debugf("hcn::HostComputeRoute::RemoveEndpoint route=%s endpoint=%s", route.ID, endpoint.Id)
err := route.Delete()
if err != nil {
return nil, err
}
// Create a list of all the endpoints besides the one being removed
i := 0
for index, endpointReference := range route.HostComputeEndpoints {
if endpointReference == endpoint.Id {
i = index
break
}
}
route.HostComputeEndpoints = append(route.HostComputeEndpoints[0:i], route.HostComputeEndpoints[i+1:]...)
return route.Create()
}
// AddRoute for the specified endpoints and SDN Route setting
func AddRoute(endpoints []HostComputeEndpoint, destinationPrefix string, nextHop string, needEncapsulation bool) (*HostComputeRoute, error) {
logrus.Debugf("hcn::HostComputeRoute::AddRoute endpointId=%v, destinationPrefix=%v, nextHop=%v, needEncapsulation=%v", endpoints, destinationPrefix, nextHop, needEncapsulation)
if len(endpoints) <= 0 {
return nil, errors.New("Missing endpoints")
}
route := &HostComputeRoute{
SchemaVersion: V2SchemaVersion(),
Setting: []SDNRoutePolicySetting{
{
DestinationPrefix: destinationPrefix,
NextHop: nextHop,
NeedEncap: needEncapsulation,
},
},
}
for _, endpoint := range endpoints {
route.HostComputeEndpoints = append(route.HostComputeEndpoints, endpoint.Id)
}
return route.Create()
}
func enumerateRoutes(query string) ([]HostComputeRoute, error) {
// Enumerate all routes Guids
var (
resultBuffer *uint16
routeBuffer *uint16
)
hr := hcnEnumerateRoutes(query, &routeBuffer, &resultBuffer)
if err := checkForErrors("hcnEnumerateRoutes", hr, resultBuffer); err != nil {
return nil, err
}
routes := interop.ConvertAndFreeCoTaskMemString(routeBuffer)
var routeIds []guid.GUID
if err := json.Unmarshal([]byte(routes), &routeIds); err != nil {
return nil, err
}
var outputRoutes []HostComputeRoute
for _, routeGUID := range routeIds {
route, err := getRoute(routeGUID, query)
if err != nil {
return nil, err
}
outputRoutes = append(outputRoutes, *route)
}
return outputRoutes, nil
}
func getRoute(routeGUID guid.GUID, query string) (*HostComputeRoute, error) {
// Open routes.
var (
routeHandle hcnRoute
resultBuffer *uint16
propertiesBuffer *uint16
)
hr := hcnOpenRoute(&routeGUID, &routeHandle, &resultBuffer)
if err := checkForErrors("hcnOpenRoute", hr, resultBuffer); err != nil {
return nil, err
}
// Query routes.
hr = hcnQueryRouteProperties(routeHandle, query, &propertiesBuffer, &resultBuffer)
if err := checkForErrors("hcnQueryRouteProperties", hr, resultBuffer); err != nil {
return nil, err
}
properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer)
// Close routes.
hr = hcnCloseRoute(routeHandle)
if err := checkForErrors("hcnCloseRoute", hr, nil); err != nil {
return nil, err
}
// Convert output to HostComputeRoute
var outputRoute HostComputeRoute
if err := json.Unmarshal([]byte(properties), &outputRoute); err != nil {
return nil, err
}
return &outputRoute, nil
}
func createRoute(settings string) (*HostComputeRoute, error) {
// Create new route.
var (
routeHandle hcnRoute
resultBuffer *uint16
propertiesBuffer *uint16
)
routeGUID := guid.GUID{}
hr := hcnCreateRoute(&routeGUID, settings, &routeHandle, &resultBuffer)
if err := checkForErrors("hcnCreateRoute", hr, resultBuffer); err != nil {
return nil, err
}
// Query route.
hcnQuery := defaultQuery()
query, err := json.Marshal(hcnQuery)
if err != nil {
return nil, err
}
hr = hcnQueryRouteProperties(routeHandle, string(query), &propertiesBuffer, &resultBuffer)
if err := checkForErrors("hcnQueryRouteProperties", hr, resultBuffer); err != nil {
return nil, err
}
properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer)
// Close Route.
hr = hcnCloseRoute(routeHandle)
if err := checkForErrors("hcnCloseRoute", hr, nil); err != nil {
return nil, err
}
// Convert output to HostComputeRoute
var outputRoute HostComputeRoute
if err := json.Unmarshal([]byte(properties), &outputRoute); err != nil {
return nil, err
}
return &outputRoute, nil
}
func deleteRoute(routeID string) error {
routeGUID, err := guid.FromString(routeID)
if err != nil {
return errInvalidRouteID
}
var resultBuffer *uint16
hr := hcnDeleteRoute(&routeGUID, &resultBuffer)
if err := checkForErrors("hcnDeleteRoute", hr, resultBuffer); err != nil {
return err
}
return nil
}

View File

@ -14,6 +14,7 @@ type SupportedFeatures struct {
Slash32EndpointPrefixes bool `json:"Slash32EndpointPrefixes"` Slash32EndpointPrefixes bool `json:"Slash32EndpointPrefixes"`
AclSupportForProtocol252 bool `json:"AclSupportForProtocol252"` AclSupportForProtocol252 bool `json:"AclSupportForProtocol252"`
SessionAffinity bool `json:"SessionAffinity"` SessionAffinity bool `json:"SessionAffinity"`
IPv6DualStack bool `json:"IPv6DualStack"`
} }
// AclFeatures are the supported ACL possibilities. // AclFeatures are the supported ACL possibilities.
@ -59,6 +60,7 @@ func GetSupportedFeatures() SupportedFeatures {
features.Slash32EndpointPrefixes = isFeatureSupported(globals.Version, Slash32EndpointPrefixesVersion) features.Slash32EndpointPrefixes = isFeatureSupported(globals.Version, Slash32EndpointPrefixesVersion)
features.AclSupportForProtocol252 = isFeatureSupported(globals.Version, AclSupportForProtocol252Version) features.AclSupportForProtocol252 = isFeatureSupported(globals.Version, AclSupportForProtocol252Version)
features.SessionAffinity = isFeatureSupported(globals.Version, SessionAffinityVersion) features.SessionAffinity = isFeatureSupported(globals.Version, SessionAffinityVersion)
features.IPv6DualStack = isFeatureSupported(globals.Version, IPv6DualStackVersion)
return features return features
} }

View File

@ -71,6 +71,13 @@ var (
procHcnQueryLoadBalancerProperties = modcomputenetwork.NewProc("HcnQueryLoadBalancerProperties") procHcnQueryLoadBalancerProperties = modcomputenetwork.NewProc("HcnQueryLoadBalancerProperties")
procHcnDeleteLoadBalancer = modcomputenetwork.NewProc("HcnDeleteLoadBalancer") procHcnDeleteLoadBalancer = modcomputenetwork.NewProc("HcnDeleteLoadBalancer")
procHcnCloseLoadBalancer = modcomputenetwork.NewProc("HcnCloseLoadBalancer") procHcnCloseLoadBalancer = modcomputenetwork.NewProc("HcnCloseLoadBalancer")
procHcnEnumerateSdnRoutes = modcomputenetwork.NewProc("HcnEnumerateSdnRoutes")
procHcnCreateSdnRoute = modcomputenetwork.NewProc("HcnCreateSdnRoute")
procHcnOpenSdnRoute = modcomputenetwork.NewProc("HcnOpenSdnRoute")
procHcnModifySdnRoute = modcomputenetwork.NewProc("HcnModifySdnRoute")
procHcnQuerySdnRouteProperties = modcomputenetwork.NewProc("HcnQuerySdnRouteProperties")
procHcnDeleteSdnRoute = modcomputenetwork.NewProc("HcnDeleteSdnRoute")
procHcnCloseSdnRoute = modcomputenetwork.NewProc("HcnCloseSdnRoute")
procHcnOpenService = modcomputenetwork.NewProc("HcnOpenService") procHcnOpenService = modcomputenetwork.NewProc("HcnOpenService")
procHcnRegisterServiceCallback = modcomputenetwork.NewProc("HcnRegisterServiceCallback") procHcnRegisterServiceCallback = modcomputenetwork.NewProc("HcnRegisterServiceCallback")
procHcnUnregisterServiceCallback = modcomputenetwork.NewProc("HcnUnregisterServiceCallback") procHcnUnregisterServiceCallback = modcomputenetwork.NewProc("HcnUnregisterServiceCallback")
@ -657,6 +664,140 @@ func hcnCloseLoadBalancer(loadBalancer hcnLoadBalancer) (hr error) {
return return
} }
func hcnEnumerateRoutes(query string, routes **uint16, result **uint16) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(query)
if hr != nil {
return
}
return _hcnEnumerateRoutes(_p0, routes, result)
}
func _hcnEnumerateRoutes(query *uint16, routes **uint16, result **uint16) (hr error) {
if hr = procHcnEnumerateSdnRoutes.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall(procHcnEnumerateSdnRoutes.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(routes)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
func hcnCreateRoute(id *_guid, settings string, route *hcnRoute, result **uint16) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(settings)
if hr != nil {
return
}
return _hcnCreateRoute(id, _p0, route, result)
}
func _hcnCreateRoute(id *_guid, settings *uint16, route *hcnRoute, result **uint16) (hr error) {
if hr = procHcnCreateSdnRoute.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall6(procHcnCreateSdnRoute.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(route)), uintptr(unsafe.Pointer(result)), 0, 0)
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
func hcnOpenRoute(id *_guid, route *hcnRoute, result **uint16) (hr error) {
if hr = procHcnOpenSdnRoute.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall(procHcnOpenSdnRoute.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(route)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
func hcnModifyRoute(route hcnRoute, settings string, result **uint16) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(settings)
if hr != nil {
return
}
return _hcnModifyRoute(route, _p0, result)
}
func _hcnModifyRoute(route hcnRoute, settings *uint16, result **uint16) (hr error) {
if hr = procHcnModifySdnRoute.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall(procHcnModifySdnRoute.Addr(), 3, uintptr(route), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
func hcnQueryRouteProperties(route hcnRoute, query string, properties **uint16, result **uint16) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(query)
if hr != nil {
return
}
return _hcnQueryRouteProperties(route, _p0, properties, result)
}
func _hcnQueryRouteProperties(route hcnRoute, query *uint16, properties **uint16, result **uint16) (hr error) {
if hr = procHcnQuerySdnRouteProperties.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall6(procHcnQuerySdnRouteProperties.Addr(), 4, uintptr(route), uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0)
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
func hcnDeleteRoute(id *_guid, result **uint16) (hr error) {
if hr = procHcnDeleteSdnRoute.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall(procHcnDeleteSdnRoute.Addr(), 2, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(result)), 0)
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
func hcnCloseRoute(route hcnRoute) (hr error) {
if hr = procHcnCloseSdnRoute.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall(procHcnCloseSdnRoute.Addr(), 1, uintptr(route), 0, 0)
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
func hcnOpenService(service *hcnService, result **uint16) (hr error) { func hcnOpenService(service *hcnService, result **uint16) (hr error) {
if hr = procHcnOpenService.Find(); hr != nil { if hr = procHcnOpenService.Find(); hr != nil {
return return

View File

@ -21,8 +21,11 @@ const (
OutboundNat = hns.OutboundNat OutboundNat = hns.OutboundNat
ExternalLoadBalancer = hns.ExternalLoadBalancer ExternalLoadBalancer = hns.ExternalLoadBalancer
Route = hns.Route Route = hns.Route
Proxy = hns.Proxy
) )
type ProxyPolicy = hns.ProxyPolicy
type NatPolicy = hns.NatPolicy type NatPolicy = hns.NatPolicy
type QosPolicy = hns.QosPolicy type QosPolicy = hns.QosPolicy

View File

@ -0,0 +1,5 @@
package hcs
//go:generate go run ../../mksyscall_windows.go -output zsyscall_windows.go syscall.go
//sys hcsFormatWritableLayerVhd(handle uintptr) (hr error) = computestorage.HcsFormatWritableLayerVhd

View File

@ -1,10 +1,14 @@
package hcs package hcs
import ( import (
"context"
"io" "io"
"syscall" "syscall"
"github.com/Microsoft/go-winio" "github.com/Microsoft/go-winio"
diskutil "github.com/Microsoft/go-winio/vhd"
"github.com/pkg/errors"
"golang.org/x/sys/windows"
) )
// makeOpenFiles calls winio.MakeOpenFile for each handle in a slice but closes all the handles // makeOpenFiles calls winio.MakeOpenFile for each handle in a slice but closes all the handles
@ -31,3 +35,27 @@ func makeOpenFiles(hs []syscall.Handle) (_ []io.ReadWriteCloser, err error) {
} }
return fs, nil return fs, nil
} }
// creates a VHD formatted with NTFS of size `sizeGB` at the given `vhdPath`.
func CreateNTFSVHD(ctx context.Context, vhdPath string, sizeGB uint32) (err error) {
if err := diskutil.CreateVhdx(vhdPath, sizeGB, 1); err != nil {
return errors.Wrap(err, "failed to create VHD")
}
vhd, err := diskutil.OpenVirtualDisk(vhdPath, diskutil.VirtualDiskAccessNone, diskutil.OpenVirtualDiskFlagNone)
if err != nil {
return errors.Wrap(err, "failed to open VHD")
}
defer func() {
err2 := windows.CloseHandle(windows.Handle(vhd))
if err == nil {
err = errors.Wrap(err2, "failed to close VHD")
}
}()
if err := hcsFormatWritableLayerVhd(uintptr(vhd)); err != nil {
return errors.Wrap(err, "failed to format VHD")
}
return nil
}

View File

@ -0,0 +1,54 @@
// Code generated mksyscall_windows.exe DO NOT EDIT
package hcs
import (
"syscall"
"unsafe"
"golang.org/x/sys/windows"
)
var _ unsafe.Pointer
// Do the interface allocations only once for common
// Errno values.
const (
errnoERROR_IO_PENDING = 997
)
var (
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
)
// errnoErr returns common boxed Errno values, to prevent
// allocations at runtime.
func errnoErr(e syscall.Errno) error {
switch e {
case 0:
return nil
case errnoERROR_IO_PENDING:
return errERROR_IO_PENDING
}
// TODO: add more here, after collecting data on the common
// error values see on Windows. (perhaps when running
// all.bat?)
return e
}
var (
modcomputestorage = windows.NewLazySystemDLL("computestorage.dll")
procHcsFormatWritableLayerVhd = modcomputestorage.NewProc("HcsFormatWritableLayerVhd")
)
func hcsFormatWritableLayerVhd(handle uintptr) (hr error) {
r0, _, _ := syscall.Syscall(procHcsFormatWritableLayerVhd.Addr(), 1, uintptr(handle), 0, 0)
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}

View File

@ -173,6 +173,27 @@ func (endpoint *HNSEndpoint) ApplyACLPolicy(policies ...*ACLPolicy) error {
return err return err
} }
// ApplyProxyPolicy applies a set of Proxy Policies on the Endpoint
func (endpoint *HNSEndpoint) ApplyProxyPolicy(policies ...*ProxyPolicy) error {
operation := "ApplyProxyPolicy"
title := "hcsshim::HNSEndpoint::" + operation
logrus.Debugf(title+" id=%s", endpoint.Id)
for _, policy := range policies {
if policy == nil {
continue
}
jsonString, err := json.Marshal(policy)
if err != nil {
return err
}
endpoint.Policies = append(endpoint.Policies, jsonString)
}
_, err := endpoint.Update()
return err
}
// ContainerAttach attaches an endpoint to container // ContainerAttach attaches an endpoint to container
func (endpoint *HNSEndpoint) ContainerAttach(containerID string, compartmentID uint16) error { func (endpoint *HNSEndpoint) ContainerAttach(containerID string, compartmentID uint16) error {
operation := "ContainerAttach" operation := "ContainerAttach"

View File

@ -17,6 +17,7 @@ const (
OutboundNat PolicyType = "OutBoundNAT" OutboundNat PolicyType = "OutBoundNAT"
ExternalLoadBalancer PolicyType = "ELB" ExternalLoadBalancer PolicyType = "ELB"
Route PolicyType = "ROUTE" Route PolicyType = "ROUTE"
Proxy PolicyType = "PROXY"
) )
type NatPolicy struct { type NatPolicy struct {
@ -60,6 +61,15 @@ type OutboundNatPolicy struct {
Destinations []string `json:",omitempty"` Destinations []string `json:",omitempty"`
} }
type ProxyPolicy struct {
Type PolicyType `json:"Type"`
IP string `json:",omitempty"`
Port string `json:",omitempty"`
ExceptionList []string `json:",omitempty"`
Destination string `json:",omitempty"`
OutboundNat bool `json:",omitempty"`
}
type ActionType string type ActionType string
type DirectionType string type DirectionType string
type RuleType string type RuleType string

View File

@ -39,4 +39,8 @@ type Devices struct {
FlexibleIov map[string]FlexibleIoDevice `json:"FlexibleIov,omitempty"` FlexibleIov map[string]FlexibleIoDevice `json:"FlexibleIov,omitempty"`
SharedMemory *SharedMemoryConfiguration `json:"SharedMemory,omitempty"` SharedMemory *SharedMemoryConfiguration `json:"SharedMemory,omitempty"`
// TODO: This is pre-release support in schema 2.3. Need to add build number
// docs when a public build with this is out.
VirtualPci map[string]VirtualPciDevice `json:",omitempty"`
} }

View File

@ -27,4 +27,23 @@ type Memory2 struct {
// to the VM, allowing it to trim non-zeroed pages from the working set (if supported by // to the VM, allowing it to trim non-zeroed pages from the working set (if supported by
// the guest operating system). // the guest operating system).
EnableColdDiscardHint bool `json:"EnableColdDiscardHint,omitempty"` EnableColdDiscardHint bool `json:"EnableColdDiscardHint,omitempty"`
// LowMmioGapInMB is the low MMIO region allocated below 4GB.
//
// TODO: This is pre-release support in schema 2.3. Need to add build number
// docs when a public build with this is out.
LowMMIOGapInMB uint64 `json:"LowMmioGapInMB,omitempty"`
// HighMmioBaseInMB is the high MMIO region allocated above 4GB (base and
// size).
//
// TODO: This is pre-release support in schema 2.3. Need to add build number
// docs when a public build with this is out.
HighMMIOBaseInMB uint64 `json:"HighMmioBaseInMB,omitempty"`
// HighMmioGapInMB is the high MMIO region.
//
// TODO: This is pre-release support in schema 2.3. Need to add build number
// docs when a public build with this is out.
HighMMIOGapInMB uint64 `json:"HighMmioGapInMB,omitempty"`
} }

View File

@ -0,0 +1,16 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.3
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
// TODO: This is pre-release support in schema 2.3. Need to add build number
// docs when a public build with this is out.
type VirtualPciDevice struct {
Functions []VirtualPciFunction `json:",omitempty"`
}

View File

@ -0,0 +1,18 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.3
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
// TODO: This is pre-release support in schema 2.3. Need to add build number
// docs when a public build with this is out.
type VirtualPciFunction struct {
DeviceInstancePath string `json:",omitempty"`
VirtualFunction uint16 `json:",omitempty"`
}

View File

@ -1,28 +1,23 @@
package wclayer package wclayer
import ( import (
"context"
"github.com/Microsoft/hcsshim/internal/hcserror" "github.com/Microsoft/hcsshim/internal/hcserror"
"github.com/sirupsen/logrus" "github.com/Microsoft/hcsshim/internal/oc"
"go.opencensus.io/trace"
) )
// ActivateLayer will find the layer with the given id and mount it's filesystem. // ActivateLayer will find the layer with the given id and mount it's filesystem.
// For a read/write layer, the mounted filesystem will appear as a volume on the // For a read/write layer, the mounted filesystem will appear as a volume on the
// host, while a read-only layer is generally expected to be a no-op. // host, while a read-only layer is generally expected to be a no-op.
// An activated layer must later be deactivated via DeactivateLayer. // An activated layer must later be deactivated via DeactivateLayer.
func ActivateLayer(path string) (err error) { func ActivateLayer(ctx context.Context, path string) (err error) {
title := "hcsshim::ActivateLayer" title := "hcsshim::ActivateLayer"
fields := logrus.Fields{ ctx, span := trace.StartSpan(ctx, title)
"path": path, defer span.End()
} defer func() { oc.SetSpanStatus(span, err) }()
logrus.WithFields(fields).Debug(title) span.AddAttributes(trace.StringAttribute("path", path))
defer func() {
if err != nil {
fields[logrus.ErrorKey] = err
logrus.WithFields(fields).Error(err)
} else {
logrus.WithFields(fields).Debug(title + " - succeeded")
}
}()
err = activateLayer(&stdDriverInfo, path) err = activateLayer(&stdDriverInfo, path)
if err != nil { if err != nil {

View File

@ -1,6 +1,7 @@
package wclayer package wclayer
import ( import (
"context"
"errors" "errors"
"os" "os"
"path/filepath" "path/filepath"
@ -8,10 +9,15 @@ import (
"github.com/Microsoft/go-winio" "github.com/Microsoft/go-winio"
"github.com/Microsoft/hcsshim/internal/hcserror" "github.com/Microsoft/hcsshim/internal/hcserror"
"github.com/Microsoft/hcsshim/internal/oc"
"github.com/Microsoft/hcsshim/internal/safefile" "github.com/Microsoft/hcsshim/internal/safefile"
"go.opencensus.io/trace"
) )
type baseLayerWriter struct { type baseLayerWriter struct {
ctx context.Context
s *trace.Span
root *os.File root *os.File
f *os.File f *os.File
bw *winio.BackupFileWriter bw *winio.BackupFileWriter
@ -136,12 +142,15 @@ func (w *baseLayerWriter) Write(b []byte) (int, error) {
return n, err return n, err
} }
func (w *baseLayerWriter) Close() error { func (w *baseLayerWriter) Close() (err error) {
defer w.s.End()
defer func() { oc.SetSpanStatus(w.s, err) }()
defer func() { defer func() {
w.root.Close() w.root.Close()
w.root = nil w.root = nil
}() }()
err := w.closeCurrentFile()
err = w.closeCurrentFile()
if err != nil { if err != nil {
return err return err
} }
@ -153,7 +162,7 @@ func (w *baseLayerWriter) Close() error {
return err return err
} }
err = ProcessBaseLayer(w.root.Name()) err = ProcessBaseLayer(w.ctx, w.root.Name())
if err != nil { if err != nil {
return err return err
} }
@ -163,7 +172,7 @@ func (w *baseLayerWriter) Close() error {
if err != nil { if err != nil {
return err return err
} }
err = ProcessUtilityVMImage(filepath.Join(w.root.Name(), "UtilityVM")) err = ProcessUtilityVMImage(w.ctx, filepath.Join(w.root.Name(), "UtilityVM"))
if err != nil { if err != nil {
return err return err
} }

View File

@ -1,27 +1,23 @@
package wclayer package wclayer
import ( import (
"context"
"github.com/Microsoft/hcsshim/internal/hcserror" "github.com/Microsoft/hcsshim/internal/hcserror"
"github.com/sirupsen/logrus" "github.com/Microsoft/hcsshim/internal/oc"
"go.opencensus.io/trace"
) )
// CreateLayer creates a new, empty, read-only layer on the filesystem based on // CreateLayer creates a new, empty, read-only layer on the filesystem based on
// the parent layer provided. // the parent layer provided.
func CreateLayer(path, parent string) (err error) { func CreateLayer(ctx context.Context, path, parent string) (err error) {
title := "hcsshim::CreateLayer" title := "hcsshim::CreateLayer"
fields := logrus.Fields{ ctx, span := trace.StartSpan(ctx, title)
"parent": parent, defer span.End()
"path": path, defer func() { oc.SetSpanStatus(span, err) }()
} span.AddAttributes(
logrus.WithFields(fields).Debug(title) trace.StringAttribute("path", path),
defer func() { trace.StringAttribute("parent", parent))
if err != nil {
fields[logrus.ErrorKey] = err
logrus.WithFields(fields).Error(err)
} else {
logrus.WithFields(fields).Debug(title + " - succeeded")
}
}()
err = createLayer(&stdDriverInfo, path, parent) err = createLayer(&stdDriverInfo, path, parent)
if err != nil { if err != nil {

View File

@ -1,31 +1,29 @@
package wclayer package wclayer
import ( import (
"context"
"strings"
"github.com/Microsoft/hcsshim/internal/hcserror" "github.com/Microsoft/hcsshim/internal/hcserror"
"github.com/sirupsen/logrus" "github.com/Microsoft/hcsshim/internal/oc"
"go.opencensus.io/trace"
) )
// CreateScratchLayer creates and populates new read-write layer for use by a container. // CreateScratchLayer creates and populates new read-write layer for use by a container.
// This requires both the id of the direct parent layer, as well as the full list // This requires both the id of the direct parent layer, as well as the full list
// of paths to all parent layers up to the base (and including the direct parent // of paths to all parent layers up to the base (and including the direct parent
// whose id was provided). // whose id was provided).
func CreateScratchLayer(path string, parentLayerPaths []string) (err error) { func CreateScratchLayer(ctx context.Context, path string, parentLayerPaths []string) (err error) {
title := "hcsshim::CreateScratchLayer" title := "hcsshim::CreateScratchLayer"
fields := logrus.Fields{ ctx, span := trace.StartSpan(ctx, title)
"path": path, defer span.End()
} defer func() { oc.SetSpanStatus(span, err) }()
logrus.WithFields(fields).Debug(title) span.AddAttributes(
defer func() { trace.StringAttribute("path", path),
if err != nil { trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", ")))
fields[logrus.ErrorKey] = err
logrus.WithFields(fields).Error(err)
} else {
logrus.WithFields(fields).Debug(title + " - succeeded")
}
}()
// Generate layer descriptors // Generate layer descriptors
layers, err := layerPathsToDescriptors(parentLayerPaths) layers, err := layerPathsToDescriptors(ctx, parentLayerPaths)
if err != nil { if err != nil {
return err return err
} }

View File

@ -1,25 +1,20 @@
package wclayer package wclayer
import ( import (
"context"
"github.com/Microsoft/hcsshim/internal/hcserror" "github.com/Microsoft/hcsshim/internal/hcserror"
"github.com/sirupsen/logrus" "github.com/Microsoft/hcsshim/internal/oc"
"go.opencensus.io/trace"
) )
// DeactivateLayer will dismount a layer that was mounted via ActivateLayer. // DeactivateLayer will dismount a layer that was mounted via ActivateLayer.
func DeactivateLayer(path string) (err error) { func DeactivateLayer(ctx context.Context, path string) (err error) {
title := "hcsshim::DeactivateLayer" title := "hcsshim::DeactivateLayer"
fields := logrus.Fields{ ctx, span := trace.StartSpan(ctx, title)
"path": path, defer span.End()
} defer func() { oc.SetSpanStatus(span, err) }()
logrus.WithFields(fields).Debug(title) span.AddAttributes(trace.StringAttribute("path", path))
defer func() {
if err != nil {
fields[logrus.ErrorKey] = err
logrus.WithFields(fields).Error(err)
} else {
logrus.WithFields(fields).Debug(title + " - succeeded")
}
}()
err = deactivateLayer(&stdDriverInfo, path) err = deactivateLayer(&stdDriverInfo, path)
if err != nil { if err != nil {

View File

@ -1,26 +1,21 @@
package wclayer package wclayer
import ( import (
"context"
"github.com/Microsoft/hcsshim/internal/hcserror" "github.com/Microsoft/hcsshim/internal/hcserror"
"github.com/sirupsen/logrus" "github.com/Microsoft/hcsshim/internal/oc"
"go.opencensus.io/trace"
) )
// DestroyLayer will remove the on-disk files representing the layer with the given // DestroyLayer will remove the on-disk files representing the layer with the given
// path, including that layer's containing folder, if any. // path, including that layer's containing folder, if any.
func DestroyLayer(path string) (err error) { func DestroyLayer(ctx context.Context, path string) (err error) {
title := "hcsshim::DestroyLayer" title := "hcsshim::DestroyLayer"
fields := logrus.Fields{ ctx, span := trace.StartSpan(ctx, title)
"path": path, defer span.End()
} defer func() { oc.SetSpanStatus(span, err) }()
logrus.WithFields(fields).Debug(title) span.AddAttributes(trace.StringAttribute("path", path))
defer func() {
if err != nil {
fields[logrus.ErrorKey] = err
logrus.WithFields(fields).Error(err)
} else {
logrus.WithFields(fields).Debug(title + " - succeeded")
}
}()
err = destroyLayer(&stdDriverInfo, path) err = destroyLayer(&stdDriverInfo, path)
if err != nil { if err != nil {

View File

@ -1,32 +1,27 @@
package wclayer package wclayer
import ( import (
"context"
"os" "os"
"path/filepath" "path/filepath"
"syscall" "syscall"
"unsafe" "unsafe"
"github.com/Microsoft/hcsshim/internal/hcserror" "github.com/Microsoft/hcsshim/internal/hcserror"
"github.com/Microsoft/hcsshim/internal/oc"
"github.com/Microsoft/hcsshim/osversion" "github.com/Microsoft/hcsshim/osversion"
"github.com/sirupsen/logrus" "go.opencensus.io/trace"
) )
// ExpandScratchSize expands the size of a layer to at least size bytes. // ExpandScratchSize expands the size of a layer to at least size bytes.
func ExpandScratchSize(path string, size uint64) (err error) { func ExpandScratchSize(ctx context.Context, path string, size uint64) (err error) {
title := "hcsshim::ExpandScratchSize" title := "hcsshim::ExpandScratchSize"
fields := logrus.Fields{ ctx, span := trace.StartSpan(ctx, title)
"path": path, defer span.End()
"size": size, defer func() { oc.SetSpanStatus(span, err) }()
} span.AddAttributes(
logrus.WithFields(fields).Debug(title) trace.StringAttribute("path", path),
defer func() { trace.Int64Attribute("size", int64(size)))
if err != nil {
fields[logrus.ErrorKey] = err
logrus.WithFields(fields).Error(err)
} else {
logrus.WithFields(fields).Debug(title + " - succeeded")
}
}()
err = expandSandboxSize(&stdDriverInfo, path, size) err = expandSandboxSize(&stdDriverInfo, path, size)
if err != nil { if err != nil {
@ -36,7 +31,7 @@ func ExpandScratchSize(path string, size uint64) (err error) {
// Manually expand the volume now in order to work around bugs in 19H1 and // Manually expand the volume now in order to work around bugs in 19H1 and
// prerelease versions of Vb. Remove once this is fixed in Windows. // prerelease versions of Vb. Remove once this is fixed in Windows.
if build := osversion.Get().Build; build >= osversion.V19H1 && build < 19020 { if build := osversion.Get().Build; build >= osversion.V19H1 && build < 19020 {
err = expandSandboxVolume(path) err = expandSandboxVolume(ctx, path)
if err != nil { if err != nil {
return err return err
} }
@ -84,7 +79,7 @@ func attachVhd(path string) (syscall.Handle, error) {
return handle, nil return handle, nil
} }
func expandSandboxVolume(path string) error { func expandSandboxVolume(ctx context.Context, path string) error {
// Mount the sandbox VHD temporarily. // Mount the sandbox VHD temporarily.
vhdPath := filepath.Join(path, "sandbox.vhdx") vhdPath := filepath.Join(path, "sandbox.vhdx")
vhd, err := attachVhd(vhdPath) vhd, err := attachVhd(vhdPath)
@ -94,7 +89,7 @@ func expandSandboxVolume(path string) error {
defer syscall.Close(vhd) defer syscall.Close(vhd)
// Open the volume. // Open the volume.
volumePath, err := GetLayerMountPath(path) volumePath, err := GetLayerMountPath(ctx, path)
if err != nil { if err != nil {
return err return err
} }

View File

@ -1,12 +1,15 @@
package wclayer package wclayer
import ( import (
"context"
"io/ioutil" "io/ioutil"
"os" "os"
"strings"
"github.com/Microsoft/go-winio" "github.com/Microsoft/go-winio"
"github.com/Microsoft/hcsshim/internal/hcserror" "github.com/Microsoft/hcsshim/internal/hcserror"
"github.com/sirupsen/logrus" "github.com/Microsoft/hcsshim/internal/oc"
"go.opencensus.io/trace"
) )
// ExportLayer will create a folder at exportFolderPath and fill that folder with // ExportLayer will create a folder at exportFolderPath and fill that folder with
@ -14,24 +17,18 @@ import (
// format includes any metadata required for later importing the layer (using // format includes any metadata required for later importing the layer (using
// ImportLayer), and requires the full list of parent layer paths in order to // ImportLayer), and requires the full list of parent layer paths in order to
// perform the export. // perform the export.
func ExportLayer(path string, exportFolderPath string, parentLayerPaths []string) (err error) { func ExportLayer(ctx context.Context, path string, exportFolderPath string, parentLayerPaths []string) (err error) {
title := "hcsshim::ExportLayer" title := "hcsshim::ExportLayer"
fields := logrus.Fields{ ctx, span := trace.StartSpan(ctx, title)
"path": path, defer span.End()
"exportFolderPath": exportFolderPath, defer func() { oc.SetSpanStatus(span, err) }()
} span.AddAttributes(
logrus.WithFields(fields).Debug(title) trace.StringAttribute("path", path),
defer func() { trace.StringAttribute("exportFolderPath", exportFolderPath),
if err != nil { trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", ")))
fields[logrus.ErrorKey] = err
logrus.WithFields(fields).Error(err)
} else {
logrus.WithFields(fields).Debug(title + " - succeeded")
}
}()
// Generate layer descriptors // Generate layer descriptors
layers, err := layerPathsToDescriptors(parentLayerPaths) layers, err := layerPathsToDescriptors(ctx, parentLayerPaths)
if err != nil { if err != nil {
return err return err
} }
@ -52,25 +49,46 @@ type LayerReader interface {
// NewLayerReader returns a new layer reader for reading the contents of an on-disk layer. // NewLayerReader returns a new layer reader for reading the contents of an on-disk layer.
// The caller must have taken the SeBackupPrivilege privilege // The caller must have taken the SeBackupPrivilege privilege
// to call this and any methods on the resulting LayerReader. // to call this and any methods on the resulting LayerReader.
func NewLayerReader(path string, parentLayerPaths []string) (LayerReader, error) { func NewLayerReader(ctx context.Context, path string, parentLayerPaths []string) (_ LayerReader, err error) {
ctx, span := trace.StartSpan(ctx, "hcsshim::NewLayerReader")
defer func() {
if err != nil {
oc.SetSpanStatus(span, err)
span.End()
}
}()
span.AddAttributes(
trace.StringAttribute("path", path),
trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", ")))
exportPath, err := ioutil.TempDir("", "hcs") exportPath, err := ioutil.TempDir("", "hcs")
if err != nil { if err != nil {
return nil, err return nil, err
} }
err = ExportLayer(path, exportPath, parentLayerPaths) err = ExportLayer(ctx, path, exportPath, parentLayerPaths)
if err != nil { if err != nil {
os.RemoveAll(exportPath) os.RemoveAll(exportPath)
return nil, err return nil, err
} }
return &legacyLayerReaderWrapper{newLegacyLayerReader(exportPath)}, nil return &legacyLayerReaderWrapper{
ctx: ctx,
s: span,
legacyLayerReader: newLegacyLayerReader(exportPath),
}, nil
} }
type legacyLayerReaderWrapper struct { type legacyLayerReaderWrapper struct {
ctx context.Context
s *trace.Span
*legacyLayerReader *legacyLayerReader
} }
func (r *legacyLayerReaderWrapper) Close() error { func (r *legacyLayerReaderWrapper) Close() (err error) {
err := r.legacyLayerReader.Close() defer r.s.End()
defer func() { oc.SetSpanStatus(r.s, err) }()
err = r.legacyLayerReader.Close()
os.RemoveAll(r.root) os.RemoveAll(r.root)
return err return err
} }

View File

@ -1,36 +1,31 @@
package wclayer package wclayer
import ( import (
"context"
"syscall" "syscall"
"github.com/Microsoft/hcsshim/internal/hcserror" "github.com/Microsoft/hcsshim/internal/hcserror"
"github.com/sirupsen/logrus" "github.com/Microsoft/hcsshim/internal/log"
"github.com/Microsoft/hcsshim/internal/oc"
"go.opencensus.io/trace"
) )
// GetLayerMountPath will look for a mounted layer with the given path and return // GetLayerMountPath will look for a mounted layer with the given path and return
// the path at which that layer can be accessed. This path may be a volume path // the path at which that layer can be accessed. This path may be a volume path
// if the layer is a mounted read-write layer, otherwise it is expected to be the // if the layer is a mounted read-write layer, otherwise it is expected to be the
// folder path at which the layer is stored. // folder path at which the layer is stored.
func GetLayerMountPath(path string) (_ string, err error) { func GetLayerMountPath(ctx context.Context, path string) (_ string, err error) {
title := "hcsshim::GetLayerMountPath" title := "hcsshim::GetLayerMountPath"
fields := logrus.Fields{ ctx, span := trace.StartSpan(ctx, title)
"path": path, defer span.End()
} defer func() { oc.SetSpanStatus(span, err) }()
logrus.WithFields(fields).Debug(title) span.AddAttributes(trace.StringAttribute("path", path))
defer func() {
if err != nil {
fields[logrus.ErrorKey] = err
logrus.WithFields(fields).Error(err)
} else {
logrus.WithFields(fields).Debug(title + " - succeeded")
}
}()
var mountPathLength uintptr var mountPathLength uintptr
mountPathLength = 0 mountPathLength = 0
// Call the procedure itself. // Call the procedure itself.
logrus.WithFields(fields).Debug("Calling proc (1)") log.G(ctx).Debug("Calling proc (1)")
err = getLayerMountPath(&stdDriverInfo, path, &mountPathLength, nil) err = getLayerMountPath(&stdDriverInfo, path, &mountPathLength, nil)
if err != nil { if err != nil {
return "", hcserror.New(err, title+" - failed", "(first call)") return "", hcserror.New(err, title+" - failed", "(first call)")
@ -44,13 +39,13 @@ func GetLayerMountPath(path string) (_ string, err error) {
mountPathp[0] = 0 mountPathp[0] = 0
// Call the procedure again // Call the procedure again
logrus.WithFields(fields).Debug("Calling proc (2)") log.G(ctx).Debug("Calling proc (2)")
err = getLayerMountPath(&stdDriverInfo, path, &mountPathLength, &mountPathp[0]) err = getLayerMountPath(&stdDriverInfo, path, &mountPathLength, &mountPathp[0])
if err != nil { if err != nil {
return "", hcserror.New(err, title+" - failed", "(second call)") return "", hcserror.New(err, title+" - failed", "(second call)")
} }
mountPath := syscall.UTF16ToString(mountPathp[0:]) mountPath := syscall.UTF16ToString(mountPathp[0:])
fields["mountPath"] = mountPath span.AddAttributes(trace.StringAttribute("mountPath", mountPath))
return mountPath, nil return mountPath, nil
} }

View File

@ -1,29 +1,29 @@
package wclayer package wclayer
import ( import (
"context"
"github.com/Microsoft/hcsshim/internal/hcserror" "github.com/Microsoft/hcsshim/internal/hcserror"
"github.com/Microsoft/hcsshim/internal/interop" "github.com/Microsoft/hcsshim/internal/interop"
"github.com/sirupsen/logrus" "github.com/Microsoft/hcsshim/internal/oc"
"go.opencensus.io/trace"
) )
// GetSharedBaseImages will enumerate the images stored in the common central // GetSharedBaseImages will enumerate the images stored in the common central
// image store and return descriptive info about those images for the purpose // image store and return descriptive info about those images for the purpose
// of registering them with the graphdriver, graph, and tagstore. // of registering them with the graphdriver, graph, and tagstore.
func GetSharedBaseImages() (imageData string, err error) { func GetSharedBaseImages(ctx context.Context) (_ string, err error) {
title := "hcsshim::GetSharedBaseImages" title := "hcsshim::GetSharedBaseImages"
logrus.Debug(title) ctx, span := trace.StartSpan(ctx, title)
defer func() { defer span.End()
if err != nil { defer func() { oc.SetSpanStatus(span, err) }()
logrus.WithError(err).Error(err)
} else {
logrus.WithField("imageData", imageData).Debug(title + " - succeeded")
}
}()
var buffer *uint16 var buffer *uint16
err = getBaseImages(&buffer) err = getBaseImages(&buffer)
if err != nil { if err != nil {
return "", hcserror.New(err, title+" - failed", "") return "", hcserror.New(err, title+" - failed", "")
} }
return interop.ConvertAndFreeCoTaskMemString(buffer), nil imageData := interop.ConvertAndFreeCoTaskMemString(buffer)
span.AddAttributes(trace.StringAttribute("imageData", imageData))
return imageData, nil
} }

View File

@ -1,26 +1,22 @@
package wclayer package wclayer
import ( import (
"context"
"github.com/Microsoft/hcsshim/internal/hcserror" "github.com/Microsoft/hcsshim/internal/hcserror"
"github.com/sirupsen/logrus" "github.com/Microsoft/hcsshim/internal/oc"
"go.opencensus.io/trace"
) )
// GrantVmAccess adds access to a file for a given VM // GrantVmAccess adds access to a file for a given VM
func GrantVmAccess(vmid string, filepath string) (err error) { func GrantVmAccess(ctx context.Context, vmid string, filepath string) (err error) {
title := "hcsshim::GrantVmAccess" title := "hcsshim::GrantVmAccess"
fields := logrus.Fields{ ctx, span := trace.StartSpan(ctx, title)
"vm-id": vmid, defer span.End()
"path": filepath, defer func() { oc.SetSpanStatus(span, err) }()
} span.AddAttributes(
logrus.WithFields(fields).Debug(title) trace.StringAttribute("vm-id", vmid),
defer func() { trace.StringAttribute("path", filepath))
if err != nil {
fields[logrus.ErrorKey] = err
logrus.WithFields(fields).Error(err)
} else {
logrus.WithFields(fields).Debug(title + " - succeeded")
}
}()
err = grantVmAccess(vmid, filepath) err = grantVmAccess(vmid, filepath)
if err != nil { if err != nil {

View File

@ -1,38 +1,35 @@
package wclayer package wclayer
import ( import (
"context"
"io/ioutil" "io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"strings"
"github.com/Microsoft/go-winio" "github.com/Microsoft/go-winio"
"github.com/Microsoft/hcsshim/internal/hcserror" "github.com/Microsoft/hcsshim/internal/hcserror"
"github.com/Microsoft/hcsshim/internal/oc"
"github.com/Microsoft/hcsshim/internal/safefile" "github.com/Microsoft/hcsshim/internal/safefile"
"github.com/sirupsen/logrus" "go.opencensus.io/trace"
) )
// ImportLayer will take the contents of the folder at importFolderPath and import // ImportLayer will take the contents of the folder at importFolderPath and import
// that into a layer with the id layerId. Note that in order to correctly populate // that into a layer with the id layerId. Note that in order to correctly populate
// the layer and interperet the transport format, all parent layers must already // the layer and interperet the transport format, all parent layers must already
// be present on the system at the paths provided in parentLayerPaths. // be present on the system at the paths provided in parentLayerPaths.
func ImportLayer(path string, importFolderPath string, parentLayerPaths []string) (err error) { func ImportLayer(ctx context.Context, path string, importFolderPath string, parentLayerPaths []string) (err error) {
title := "hcsshim::ImportLayer" title := "hcsshim::ImportLayer"
fields := logrus.Fields{ ctx, span := trace.StartSpan(ctx, title)
"path": path, defer span.End()
"importFolderPath": importFolderPath, defer func() { oc.SetSpanStatus(span, err) }()
} span.AddAttributes(
logrus.WithFields(fields).Debug(title) trace.StringAttribute("path", path),
defer func() { trace.StringAttribute("importFolderPath", importFolderPath),
if err != nil { trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", ")))
fields[logrus.ErrorKey] = err
logrus.WithFields(fields).Error(err)
} else {
logrus.WithFields(fields).Debug(title + " - succeeded")
}
}()
// Generate layer descriptors // Generate layer descriptors
layers, err := layerPathsToDescriptors(parentLayerPaths) layers, err := layerPathsToDescriptors(ctx, parentLayerPaths)
if err != nil { if err != nil {
return err return err
} }
@ -60,20 +57,26 @@ type LayerWriter interface {
} }
type legacyLayerWriterWrapper struct { type legacyLayerWriterWrapper struct {
ctx context.Context
s *trace.Span
*legacyLayerWriter *legacyLayerWriter
path string path string
parentLayerPaths []string parentLayerPaths []string
} }
func (r *legacyLayerWriterWrapper) Close() error { func (r *legacyLayerWriterWrapper) Close() (err error) {
defer r.s.End()
defer func() { oc.SetSpanStatus(r.s, err) }()
defer os.RemoveAll(r.root.Name()) defer os.RemoveAll(r.root.Name())
defer r.legacyLayerWriter.CloseRoots() defer r.legacyLayerWriter.CloseRoots()
err := r.legacyLayerWriter.Close()
err = r.legacyLayerWriter.Close()
if err != nil { if err != nil {
return err return err
} }
if err = ImportLayer(r.destRoot.Name(), r.path, r.parentLayerPaths); err != nil { if err = ImportLayer(r.ctx, r.destRoot.Name(), r.path, r.parentLayerPaths); err != nil {
return err return err
} }
for _, name := range r.Tombstones { for _, name := range r.Tombstones {
@ -96,7 +99,7 @@ func (r *legacyLayerWriterWrapper) Close() error {
if err != nil { if err != nil {
return err return err
} }
err = ProcessUtilityVMImage(filepath.Join(r.destRoot.Name(), "UtilityVM")) err = ProcessUtilityVMImage(r.ctx, filepath.Join(r.destRoot.Name(), "UtilityVM"))
if err != nil { if err != nil {
return err return err
} }
@ -107,7 +110,18 @@ func (r *legacyLayerWriterWrapper) Close() error {
// NewLayerWriter returns a new layer writer for creating a layer on disk. // NewLayerWriter returns a new layer writer for creating a layer on disk.
// The caller must have taken the SeBackupPrivilege and SeRestorePrivilege privileges // The caller must have taken the SeBackupPrivilege and SeRestorePrivilege privileges
// to call this and any methods on the resulting LayerWriter. // to call this and any methods on the resulting LayerWriter.
func NewLayerWriter(path string, parentLayerPaths []string) (LayerWriter, error) { func NewLayerWriter(ctx context.Context, path string, parentLayerPaths []string) (_ LayerWriter, err error) {
ctx, span := trace.StartSpan(ctx, "hcsshim::NewLayerWriter")
defer func() {
if err != nil {
oc.SetSpanStatus(span, err)
span.End()
}
}()
span.AddAttributes(
trace.StringAttribute("path", path),
trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", ")))
if len(parentLayerPaths) == 0 { if len(parentLayerPaths) == 0 {
// This is a base layer. It gets imported differently. // This is a base layer. It gets imported differently.
f, err := safefile.OpenRoot(path) f, err := safefile.OpenRoot(path)
@ -115,6 +129,8 @@ func NewLayerWriter(path string, parentLayerPaths []string) (LayerWriter, error)
return nil, err return nil, err
} }
return &baseLayerWriter{ return &baseLayerWriter{
ctx: ctx,
s: span,
root: f, root: f,
}, nil }, nil
} }
@ -128,6 +144,8 @@ func NewLayerWriter(path string, parentLayerPaths []string) (LayerWriter, error)
return nil, err return nil, err
} }
return &legacyLayerWriterWrapper{ return &legacyLayerWriterWrapper{
ctx: ctx,
s: span,
legacyLayerWriter: w, legacyLayerWriter: w,
path: importPath, path: importPath,
parentLayerPaths: parentLayerPaths, parentLayerPaths: parentLayerPaths,

View File

@ -1,26 +1,21 @@
package wclayer package wclayer
import ( import (
"context"
"github.com/Microsoft/hcsshim/internal/hcserror" "github.com/Microsoft/hcsshim/internal/hcserror"
"github.com/sirupsen/logrus" "github.com/Microsoft/hcsshim/internal/oc"
"go.opencensus.io/trace"
) )
// LayerExists will return true if a layer with the given id exists and is known // LayerExists will return true if a layer with the given id exists and is known
// to the system. // to the system.
func LayerExists(path string) (_ bool, err error) { func LayerExists(ctx context.Context, path string) (_ bool, err error) {
title := "hcsshim::LayerExists" title := "hcsshim::LayerExists"
fields := logrus.Fields{ ctx, span := trace.StartSpan(ctx, title)
"path": path, defer span.End()
} defer func() { oc.SetSpanStatus(span, err) }()
logrus.WithFields(fields).Debug(title) span.AddAttributes(trace.StringAttribute("path", path))
defer func() {
if err != nil {
fields[logrus.ErrorKey] = err
logrus.WithFields(fields).Error(err)
} else {
logrus.WithFields(fields).Debug(title + " - succeeded")
}
}()
// Call the procedure itself. // Call the procedure itself.
var exists uint32 var exists uint32
@ -28,6 +23,6 @@ func LayerExists(path string) (_ bool, err error) {
if err != nil { if err != nil {
return false, hcserror.New(err, title+" - failed", "") return false, hcserror.New(err, title+" - failed", "")
} }
fields["layer-exists"] = exists != 0 span.AddAttributes(trace.BoolAttribute("layer-exists", exists != 0))
return exists != 0, nil return exists != 0, nil
} }

View File

@ -1,13 +1,22 @@
package wclayer package wclayer
import ( import (
"context"
"path/filepath" "path/filepath"
"github.com/Microsoft/go-winio/pkg/guid" "github.com/Microsoft/go-winio/pkg/guid"
"github.com/Microsoft/hcsshim/internal/oc"
"go.opencensus.io/trace"
) )
// LayerID returns the layer ID of a layer on disk. // LayerID returns the layer ID of a layer on disk.
func LayerID(path string) (guid.GUID, error) { func LayerID(ctx context.Context, path string) (_ guid.GUID, err error) {
title := "hcsshim::LayerID"
ctx, span := trace.StartSpan(ctx, title)
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(trace.StringAttribute("path", path))
_, file := filepath.Split(path) _, file := filepath.Split(path)
return NameToGuid(file) return NameToGuid(ctx, file)
} }

View File

@ -4,6 +4,7 @@ package wclayer
// functionality. // functionality.
import ( import (
"context"
"syscall" "syscall"
"github.com/Microsoft/go-winio/pkg/guid" "github.com/Microsoft/go-winio/pkg/guid"
@ -68,12 +69,12 @@ type WC_LAYER_DESCRIPTOR struct {
Pathp *uint16 Pathp *uint16
} }
func layerPathsToDescriptors(parentLayerPaths []string) ([]WC_LAYER_DESCRIPTOR, error) { func layerPathsToDescriptors(ctx context.Context, parentLayerPaths []string) ([]WC_LAYER_DESCRIPTOR, error) {
// Array of descriptors that gets constructed. // Array of descriptors that gets constructed.
var layers []WC_LAYER_DESCRIPTOR var layers []WC_LAYER_DESCRIPTOR
for i := 0; i < len(parentLayerPaths); i++ { for i := 0; i < len(parentLayerPaths); i++ {
g, err := LayerID(parentLayerPaths[i]) g, err := LayerID(ctx, parentLayerPaths[i])
if err != nil { if err != nil {
logrus.WithError(err).Debug("Failed to convert name to guid") logrus.WithError(err).Debug("Failed to convert name to guid")
return nil, err return nil, err

View File

@ -1,34 +1,29 @@
package wclayer package wclayer
import ( import (
"context"
"github.com/Microsoft/go-winio/pkg/guid" "github.com/Microsoft/go-winio/pkg/guid"
"github.com/Microsoft/hcsshim/internal/hcserror" "github.com/Microsoft/hcsshim/internal/hcserror"
"github.com/sirupsen/logrus" "github.com/Microsoft/hcsshim/internal/oc"
"go.opencensus.io/trace"
) )
// NameToGuid converts the given string into a GUID using the algorithm in the // NameToGuid converts the given string into a GUID using the algorithm in the
// Host Compute Service, ensuring GUIDs generated with the same string are common // Host Compute Service, ensuring GUIDs generated with the same string are common
// across all clients. // across all clients.
func NameToGuid(name string) (id guid.GUID, err error) { func NameToGuid(ctx context.Context, name string) (_ guid.GUID, err error) {
title := "hcsshim::NameToGuid" title := "hcsshim::NameToGuid"
fields := logrus.Fields{ ctx, span := trace.StartSpan(ctx, title)
"name": name, defer span.End()
} defer func() { oc.SetSpanStatus(span, err) }()
logrus.WithFields(fields).Debug(title) span.AddAttributes(trace.StringAttribute("name", name))
defer func() {
if err != nil {
fields[logrus.ErrorKey] = err
logrus.WithFields(fields).Error(err)
} else {
logrus.WithFields(fields).Debug(title + " - succeeded")
}
}()
var id guid.GUID
err = nameToGuid(name, &id) err = nameToGuid(name, &id)
if err != nil { if err != nil {
err = hcserror.New(err, title+" - failed", "") return guid.GUID{}, hcserror.New(err, title+" - failed", "")
return
} }
fields["guid"] = id.String() span.AddAttributes(trace.StringAttribute("guid", id.String()))
return return id, nil
} }

View File

@ -1,10 +1,13 @@
package wclayer package wclayer
import ( import (
"context"
"strings"
"sync" "sync"
"github.com/Microsoft/hcsshim/internal/hcserror" "github.com/Microsoft/hcsshim/internal/hcserror"
"github.com/sirupsen/logrus" "github.com/Microsoft/hcsshim/internal/oc"
"go.opencensus.io/trace"
) )
var prepareLayerLock sync.Mutex var prepareLayerLock sync.Mutex
@ -14,23 +17,17 @@ var prepareLayerLock sync.Mutex
// parent layers, and is necessary in order to view or interact with the layer // parent layers, and is necessary in order to view or interact with the layer
// as an actual filesystem (reading and writing files, creating directories, etc). // as an actual filesystem (reading and writing files, creating directories, etc).
// Disabling the filter must be done via UnprepareLayer. // Disabling the filter must be done via UnprepareLayer.
func PrepareLayer(path string, parentLayerPaths []string) (err error) { func PrepareLayer(ctx context.Context, path string, parentLayerPaths []string) (err error) {
title := "hcsshim::PrepareLayer" title := "hcsshim::PrepareLayer"
fields := logrus.Fields{ ctx, span := trace.StartSpan(ctx, title)
"path": path, defer span.End()
} defer func() { oc.SetSpanStatus(span, err) }()
logrus.WithFields(fields).Debug(title) span.AddAttributes(
defer func() { trace.StringAttribute("path", path),
if err != nil { trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", ")))
fields[logrus.ErrorKey] = err
logrus.WithFields(fields).Error(err)
} else {
logrus.WithFields(fields).Debug(title + " - succeeded")
}
}()
// Generate layer descriptors // Generate layer descriptors
layers, err := layerPathsToDescriptors(parentLayerPaths) layers, err := layerPathsToDescriptors(ctx, parentLayerPaths)
if err != nil { if err != nil {
return err return err
} }

View File

@ -1,23 +1,41 @@
package wclayer package wclayer
import "os" import (
"context"
"os"
"github.com/Microsoft/hcsshim/internal/oc"
"go.opencensus.io/trace"
)
// ProcessBaseLayer post-processes a base layer that has had its files extracted. // ProcessBaseLayer post-processes a base layer that has had its files extracted.
// The files should have been extracted to <path>\Files. // The files should have been extracted to <path>\Files.
func ProcessBaseLayer(path string) error { func ProcessBaseLayer(ctx context.Context, path string) (err error) {
err := processBaseImage(path) title := "hcsshim::ProcessBaseLayer"
ctx, span := trace.StartSpan(ctx, title)
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(trace.StringAttribute("path", path))
err = processBaseImage(path)
if err != nil { if err != nil {
return &os.PathError{Op: "ProcessBaseLayer", Path: path, Err: err} return &os.PathError{Op: title, Path: path, Err: err}
} }
return nil return nil
} }
// ProcessUtilityVMImage post-processes a utility VM image that has had its files extracted. // ProcessUtilityVMImage post-processes a utility VM image that has had its files extracted.
// The files should have been extracted to <path>\Files. // The files should have been extracted to <path>\Files.
func ProcessUtilityVMImage(path string) error { func ProcessUtilityVMImage(ctx context.Context, path string) (err error) {
err := processUtilityImage(path) title := "hcsshim::ProcessUtilityVMImage"
ctx, span := trace.StartSpan(ctx, title)
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(trace.StringAttribute("path", path))
err = processUtilityImage(path)
if err != nil { if err != nil {
return &os.PathError{Op: "ProcessUtilityVMImage", Path: path, Err: err} return &os.PathError{Op: title, Path: path, Err: err}
} }
return nil return nil
} }

View File

@ -1,26 +1,21 @@
package wclayer package wclayer
import ( import (
"context"
"github.com/Microsoft/hcsshim/internal/hcserror" "github.com/Microsoft/hcsshim/internal/hcserror"
"github.com/sirupsen/logrus" "github.com/Microsoft/hcsshim/internal/oc"
"go.opencensus.io/trace"
) )
// UnprepareLayer disables the filesystem filter for the read-write layer with // UnprepareLayer disables the filesystem filter for the read-write layer with
// the given id. // the given id.
func UnprepareLayer(path string) (err error) { func UnprepareLayer(ctx context.Context, path string) (err error) {
title := "hcsshim::UnprepareLayer" title := "hcsshim::UnprepareLayer"
fields := logrus.Fields{ ctx, span := trace.StartSpan(ctx, title)
"path": path, defer span.End()
} defer func() { oc.SetSpanStatus(span, err) }()
logrus.WithFields(fields).Debug(title) span.AddAttributes(trace.StringAttribute("path", path))
defer func() {
if err != nil {
fields[logrus.ErrorKey] = err
logrus.WithFields(fields).Error(err)
} else {
logrus.WithFields(fields).Debug(title + " - succeeded")
}
}()
err = unprepareLayer(&stdDriverInfo, path) err = unprepareLayer(&stdDriverInfo, path)
if err != nil { if err != nil {

View File

@ -1,6 +1,7 @@
package hcsshim package hcsshim
import ( import (
"context"
"crypto/sha1" "crypto/sha1"
"path/filepath" "path/filepath"
@ -13,59 +14,59 @@ func layerPath(info *DriverInfo, id string) string {
} }
func ActivateLayer(info DriverInfo, id string) error { func ActivateLayer(info DriverInfo, id string) error {
return wclayer.ActivateLayer(layerPath(&info, id)) return wclayer.ActivateLayer(context.Background(), layerPath(&info, id))
} }
func CreateLayer(info DriverInfo, id, parent string) error { func CreateLayer(info DriverInfo, id, parent string) error {
return wclayer.CreateLayer(layerPath(&info, id), parent) return wclayer.CreateLayer(context.Background(), layerPath(&info, id), parent)
} }
// New clients should use CreateScratchLayer instead. Kept in to preserve API compatibility. // New clients should use CreateScratchLayer instead. Kept in to preserve API compatibility.
func CreateSandboxLayer(info DriverInfo, layerId, parentId string, parentLayerPaths []string) error { func CreateSandboxLayer(info DriverInfo, layerId, parentId string, parentLayerPaths []string) error {
return wclayer.CreateScratchLayer(layerPath(&info, layerId), parentLayerPaths) return wclayer.CreateScratchLayer(context.Background(), layerPath(&info, layerId), parentLayerPaths)
} }
func CreateScratchLayer(info DriverInfo, layerId, parentId string, parentLayerPaths []string) error { func CreateScratchLayer(info DriverInfo, layerId, parentId string, parentLayerPaths []string) error {
return wclayer.CreateScratchLayer(layerPath(&info, layerId), parentLayerPaths) return wclayer.CreateScratchLayer(context.Background(), layerPath(&info, layerId), parentLayerPaths)
} }
func DeactivateLayer(info DriverInfo, id string) error { func DeactivateLayer(info DriverInfo, id string) error {
return wclayer.DeactivateLayer(layerPath(&info, id)) return wclayer.DeactivateLayer(context.Background(), layerPath(&info, id))
} }
func DestroyLayer(info DriverInfo, id string) error { func DestroyLayer(info DriverInfo, id string) error {
return wclayer.DestroyLayer(layerPath(&info, id)) return wclayer.DestroyLayer(context.Background(), layerPath(&info, id))
} }
// New clients should use ExpandScratchSize instead. Kept in to preserve API compatibility. // New clients should use ExpandScratchSize instead. Kept in to preserve API compatibility.
func ExpandSandboxSize(info DriverInfo, layerId string, size uint64) error { func ExpandSandboxSize(info DriverInfo, layerId string, size uint64) error {
return wclayer.ExpandScratchSize(layerPath(&info, layerId), size) return wclayer.ExpandScratchSize(context.Background(), layerPath(&info, layerId), size)
} }
func ExpandScratchSize(info DriverInfo, layerId string, size uint64) error { func ExpandScratchSize(info DriverInfo, layerId string, size uint64) error {
return wclayer.ExpandScratchSize(layerPath(&info, layerId), size) return wclayer.ExpandScratchSize(context.Background(), layerPath(&info, layerId), size)
} }
func ExportLayer(info DriverInfo, layerId string, exportFolderPath string, parentLayerPaths []string) error { func ExportLayer(info DriverInfo, layerId string, exportFolderPath string, parentLayerPaths []string) error {
return wclayer.ExportLayer(layerPath(&info, layerId), exportFolderPath, parentLayerPaths) return wclayer.ExportLayer(context.Background(), layerPath(&info, layerId), exportFolderPath, parentLayerPaths)
} }
func GetLayerMountPath(info DriverInfo, id string) (string, error) { func GetLayerMountPath(info DriverInfo, id string) (string, error) {
return wclayer.GetLayerMountPath(layerPath(&info, id)) return wclayer.GetLayerMountPath(context.Background(), layerPath(&info, id))
} }
func GetSharedBaseImages() (imageData string, err error) { func GetSharedBaseImages() (imageData string, err error) {
return wclayer.GetSharedBaseImages() return wclayer.GetSharedBaseImages(context.Background())
} }
func ImportLayer(info DriverInfo, layerID string, importFolderPath string, parentLayerPaths []string) error { func ImportLayer(info DriverInfo, layerID string, importFolderPath string, parentLayerPaths []string) error {
return wclayer.ImportLayer(layerPath(&info, layerID), importFolderPath, parentLayerPaths) return wclayer.ImportLayer(context.Background(), layerPath(&info, layerID), importFolderPath, parentLayerPaths)
} }
func LayerExists(info DriverInfo, id string) (bool, error) { func LayerExists(info DriverInfo, id string) (bool, error) {
return wclayer.LayerExists(layerPath(&info, id)) return wclayer.LayerExists(context.Background(), layerPath(&info, id))
} }
func PrepareLayer(info DriverInfo, layerId string, parentLayerPaths []string) error { func PrepareLayer(info DriverInfo, layerId string, parentLayerPaths []string) error {
return wclayer.PrepareLayer(layerPath(&info, layerId), parentLayerPaths) return wclayer.PrepareLayer(context.Background(), layerPath(&info, layerId), parentLayerPaths)
} }
func ProcessBaseLayer(path string) error { func ProcessBaseLayer(path string) error {
return wclayer.ProcessBaseLayer(path) return wclayer.ProcessBaseLayer(context.Background(), path)
} }
func ProcessUtilityVMImage(path string) error { func ProcessUtilityVMImage(path string) error {
return wclayer.ProcessUtilityVMImage(path) return wclayer.ProcessUtilityVMImage(context.Background(), path)
} }
func UnprepareLayer(info DriverInfo, layerId string) error { func UnprepareLayer(info DriverInfo, layerId string) error {
return wclayer.UnprepareLayer(layerPath(&info, layerId)) return wclayer.UnprepareLayer(context.Background(), layerPath(&info, layerId))
} }
type DriverInfo struct { type DriverInfo struct {
@ -76,7 +77,7 @@ type DriverInfo struct {
type GUID [16]byte type GUID [16]byte
func NameToGuid(name string) (id GUID, err error) { func NameToGuid(name string) (id GUID, err error) {
g, err := wclayer.NameToGuid(name) g, err := wclayer.NameToGuid(context.Background(), name)
return g.ToWindowsArray(), err return g.ToWindowsArray(), err
} }
@ -94,13 +95,13 @@ func (g *GUID) ToString() string {
type LayerReader = wclayer.LayerReader type LayerReader = wclayer.LayerReader
func NewLayerReader(info DriverInfo, layerID string, parentLayerPaths []string) (LayerReader, error) { func NewLayerReader(info DriverInfo, layerID string, parentLayerPaths []string) (LayerReader, error) {
return wclayer.NewLayerReader(layerPath(&info, layerID), parentLayerPaths) return wclayer.NewLayerReader(context.Background(), layerPath(&info, layerID), parentLayerPaths)
} }
type LayerWriter = wclayer.LayerWriter type LayerWriter = wclayer.LayerWriter
func NewLayerWriter(info DriverInfo, layerID string, parentLayerPaths []string) (LayerWriter, error) { func NewLayerWriter(info DriverInfo, layerID string, parentLayerPaths []string) (LayerWriter, error) {
return wclayer.NewLayerWriter(layerPath(&info, layerID), parentLayerPaths) return wclayer.NewLayerWriter(context.Background(), layerPath(&info, layerID), parentLayerPaths)
} }
type WC_LAYER_DESCRIPTOR = wclayer.WC_LAYER_DESCRIPTOR type WC_LAYER_DESCRIPTOR = wclayer.WC_LAYER_DESCRIPTOR

17
vendor/github.com/cilium/ebpf/abi.go generated vendored
View File

@ -9,6 +9,7 @@ import (
"syscall" "syscall"
"github.com/cilium/ebpf/internal" "github.com/cilium/ebpf/internal"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
@ -31,7 +32,7 @@ func newMapABIFromSpec(spec *MapSpec) *MapABI {
} }
} }
func newMapABIFromFd(fd *bpfFD) (string, *MapABI, error) { func newMapABIFromFd(fd *internal.FD) (string, *MapABI, error) {
info, err := bpfGetMapInfoByFD(fd) info, err := bpfGetMapInfoByFD(fd)
if err != nil { if err != nil {
if errors.Cause(err) == syscall.EINVAL { if errors.Cause(err) == syscall.EINVAL {
@ -50,7 +51,7 @@ func newMapABIFromFd(fd *bpfFD) (string, *MapABI, error) {
}, nil }, nil
} }
func newMapABIFromProc(fd *bpfFD) (*MapABI, error) { func newMapABIFromProc(fd *internal.FD) (*MapABI, error) {
var abi MapABI var abi MapABI
err := scanFdInfo(fd, map[string]interface{}{ err := scanFdInfo(fd, map[string]interface{}{
"map_type": &abi.Type, "map_type": &abi.Type,
@ -94,7 +95,7 @@ func newProgramABIFromSpec(spec *ProgramSpec) *ProgramABI {
} }
} }
func newProgramABIFromFd(fd *bpfFD) (string, *ProgramABI, error) { func newProgramABIFromFd(fd *internal.FD) (string, *ProgramABI, error) {
info, err := bpfGetProgInfoByFD(fd) info, err := bpfGetProgInfoByFD(fd)
if err != nil { if err != nil {
if errors.Cause(err) == syscall.EINVAL { if errors.Cause(err) == syscall.EINVAL {
@ -105,10 +106,10 @@ func newProgramABIFromFd(fd *bpfFD) (string, *ProgramABI, error) {
} }
var name string var name string
if bpfName := convertCString(info.name[:]); bpfName != "" { if bpfName := internal.CString(info.name[:]); bpfName != "" {
name = bpfName name = bpfName
} else { } else {
name = convertCString(info.tag[:]) name = internal.CString(info.tag[:])
} }
return name, &ProgramABI{ return name, &ProgramABI{
@ -116,7 +117,7 @@ func newProgramABIFromFd(fd *bpfFD) (string, *ProgramABI, error) {
}, nil }, nil
} }
func newProgramABIFromProc(fd *bpfFD) (string, *ProgramABI, error) { func newProgramABIFromProc(fd *internal.FD) (string, *ProgramABI, error) {
var ( var (
abi ProgramABI abi ProgramABI
name string name string
@ -139,8 +140,8 @@ func newProgramABIFromProc(fd *bpfFD) (string, *ProgramABI, error) {
return name, &abi, nil return name, &abi, nil
} }
func scanFdInfo(fd *bpfFD, fields map[string]interface{}) error { func scanFdInfo(fd *internal.FD, fields map[string]interface{}) error {
raw, err := fd.value() raw, err := fd.Value()
if err != nil { if err != nil {
return err return err
} }

View File

@ -2,6 +2,7 @@ package ebpf
import ( import (
"github.com/cilium/ebpf/asm" "github.com/cilium/ebpf/asm"
"github.com/cilium/ebpf/internal/btf"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
@ -55,17 +56,61 @@ func NewCollection(spec *CollectionSpec) (*Collection, error) {
// NewCollectionWithOptions creates a Collection from a specification. // NewCollectionWithOptions creates a Collection from a specification.
// //
// Only maps referenced by at least one of the programs are initialized. // Only maps referenced by at least one of the programs are initialized.
func NewCollectionWithOptions(spec *CollectionSpec, opts CollectionOptions) (*Collection, error) { func NewCollectionWithOptions(spec *CollectionSpec, opts CollectionOptions) (coll *Collection, err error) {
maps := make(map[string]*Map) var (
maps = make(map[string]*Map)
progs = make(map[string]*Program)
btfs = make(map[*btf.Spec]*btf.Handle)
)
defer func() {
for _, btf := range btfs {
btf.Close()
}
if err == nil {
return
}
for _, m := range maps {
m.Close()
}
for _, p := range progs {
p.Close()
}
}()
loadBTF := func(spec *btf.Spec) (*btf.Handle, error) {
if btfs[spec] != nil {
return btfs[spec], nil
}
handle, err := btf.NewHandle(spec)
if err != nil {
return nil, err
}
btfs[spec] = handle
return handle, nil
}
for mapName, mapSpec := range spec.Maps { for mapName, mapSpec := range spec.Maps {
m, err := NewMap(mapSpec) var handle *btf.Handle
if mapSpec.BTF != nil {
handle, err = loadBTF(btf.MapSpec(mapSpec.BTF))
if err != nil && !btf.IsNotSupported(err) {
return nil, err
}
}
m, err := newMapWithBTF(mapSpec, handle)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "map %s", mapName) return nil, errors.Wrapf(err, "map %s", mapName)
} }
maps[mapName] = m maps[mapName] = m
} }
progs := make(map[string]*Program)
for progName, origProgSpec := range spec.Programs { for progName, origProgSpec := range spec.Programs {
progSpec := origProgSpec.Copy() progSpec := origProgSpec.Copy()
@ -91,7 +136,15 @@ func NewCollectionWithOptions(spec *CollectionSpec, opts CollectionOptions) (*Co
} }
} }
prog, err := NewProgramWithOptions(progSpec, opts.Programs) var handle *btf.Handle
if progSpec.BTF != nil {
handle, err = loadBTF(btf.ProgramSpec(progSpec.BTF))
if err != nil && !btf.IsNotSupported(err) {
return nil, err
}
}
prog, err := newProgramWithBTF(progSpec, handle, opts.Programs)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "program %s", progName) return nil, errors.Wrapf(err, "program %s", progName)
} }

View File

@ -4,12 +4,13 @@ import (
"bytes" "bytes"
"debug/elf" "debug/elf"
"encoding/binary" "encoding/binary"
"fmt"
"io" "io"
"os" "os"
"strings" "strings"
"github.com/cilium/ebpf/asm" "github.com/cilium/ebpf/asm"
"github.com/cilium/ebpf/internal"
"github.com/cilium/ebpf/internal/btf"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
@ -18,6 +19,8 @@ type elfCode struct {
*elf.File *elf.File
symbols []elf.Symbol symbols []elf.Symbol
symbolsPerSection map[elf.SectionIndex]map[uint64]string symbolsPerSection map[elf.SectionIndex]map[uint64]string
license string
version uint32
} }
// LoadCollectionSpec parses an ELF file into a CollectionSpec. // LoadCollectionSpec parses an ELF file into a CollectionSpec.
@ -33,8 +36,8 @@ func LoadCollectionSpec(file string) (*CollectionSpec, error) {
} }
// LoadCollectionSpecFromReader parses an ELF file into a CollectionSpec. // LoadCollectionSpecFromReader parses an ELF file into a CollectionSpec.
func LoadCollectionSpecFromReader(code io.ReaderAt) (*CollectionSpec, error) { func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) {
f, err := elf.NewFile(code) f, err := elf.NewFile(rd)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -45,12 +48,17 @@ func LoadCollectionSpecFromReader(code io.ReaderAt) (*CollectionSpec, error) {
return nil, errors.Wrap(err, "load symbols") return nil, errors.Wrap(err, "load symbols")
} }
ec := &elfCode{f, symbols, symbolsPerSection(symbols)} ec := &elfCode{f, symbols, symbolsPerSection(symbols), "", 0}
var (
licenseSection *elf.Section
versionSection *elf.Section
btfMaps = make(map[elf.SectionIndex]*elf.Section)
progSections = make(map[elf.SectionIndex]*elf.Section)
relSections = make(map[elf.SectionIndex]*elf.Section)
mapSections = make(map[elf.SectionIndex]*elf.Section)
)
var licenseSection, versionSection *elf.Section
progSections := make(map[elf.SectionIndex]*elf.Section)
relSections := make(map[elf.SectionIndex]*elf.Section)
mapSections := make(map[elf.SectionIndex]*elf.Section)
for i, sec := range ec.Sections { for i, sec := range ec.Sections {
switch { switch {
case strings.HasPrefix(sec.Name, "license"): case strings.HasPrefix(sec.Name, "license"):
@ -59,6 +67,8 @@ func LoadCollectionSpecFromReader(code io.ReaderAt) (*CollectionSpec, error) {
versionSection = sec versionSection = sec
case strings.HasPrefix(sec.Name, "maps"): case strings.HasPrefix(sec.Name, "maps"):
mapSections[elf.SectionIndex(i)] = sec mapSections[elf.SectionIndex(i)] = sec
case sec.Name == ".maps":
btfMaps[elf.SectionIndex(i)] = sec
case sec.Type == elf.SHT_REL: case sec.Type == elf.SHT_REL:
if int(sec.Info) >= len(ec.Sections) { if int(sec.Info) >= len(ec.Sections) {
return nil, errors.Errorf("found relocation section %v for missing section %v", i, sec.Info) return nil, errors.Errorf("found relocation section %v for missing section %v", i, sec.Info)
@ -67,7 +77,7 @@ func LoadCollectionSpecFromReader(code io.ReaderAt) (*CollectionSpec, error) {
// Store relocations under the section index of the target // Store relocations under the section index of the target
idx := elf.SectionIndex(sec.Info) idx := elf.SectionIndex(sec.Info)
if relSections[idx] != nil { if relSections[idx] != nil {
return nil, errors.Errorf("section %d has multiple relocation sections", idx) return nil, errors.Errorf("section %d has multiple relocation sections", sec.Info)
} }
relSections[idx] = sec relSections[idx] = sec
case sec.Type == elf.SHT_PROGBITS && (sec.Flags&elf.SHF_EXECINSTR) != 0 && sec.Size > 0: case sec.Type == elf.SHT_PROGBITS && (sec.Flags&elf.SHF_EXECINSTR) != 0 && sec.Size > 0:
@ -75,33 +85,36 @@ func LoadCollectionSpecFromReader(code io.ReaderAt) (*CollectionSpec, error) {
} }
} }
license, err := loadLicense(licenseSection) ec.license, err = loadLicense(licenseSection)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "load license") return nil, errors.Wrap(err, "load license")
} }
version, err := loadVersion(versionSection, ec.ByteOrder) ec.version, err = loadVersion(versionSection, ec.ByteOrder)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "load version") return nil, errors.Wrap(err, "load version")
} }
maps, err := ec.loadMaps(mapSections) btf, err := btf.LoadSpecFromReader(rd)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "load BTF")
}
maps := make(map[string]*MapSpec)
if err := ec.loadMaps(maps, mapSections); err != nil {
return nil, errors.Wrap(err, "load maps") return nil, errors.Wrap(err, "load maps")
} }
progs, libs, err := ec.loadPrograms(progSections, relSections, license, version) if len(btfMaps) > 0 {
if err != nil { if err := ec.loadBTFMaps(maps, btfMaps, btf); err != nil {
return nil, errors.Wrap(err, "load programs") return nil, errors.Wrap(err, "load BTF maps")
}
} }
if len(libs) > 0 { progs, err := ec.loadPrograms(progSections, relSections, btf)
for name, prog := range progs { if err != nil {
prog.Instructions, err = link(prog.Instructions, libs...) return nil, errors.Wrap(err, "load programs")
if err != nil {
return nil, errors.Wrapf(err, "program %s", name)
}
}
} }
return &CollectionSpec{maps, progs}, nil return &CollectionSpec{maps, progs}, nil
@ -128,52 +141,74 @@ func loadVersion(sec *elf.Section, bo binary.ByteOrder) (uint32, error) {
return version, errors.Wrapf(err, "section %s", sec.Name) return version, errors.Wrapf(err, "section %s", sec.Name)
} }
func (ec *elfCode) loadPrograms(progSections, relSections map[elf.SectionIndex]*elf.Section, license string, version uint32) (map[string]*ProgramSpec, []asm.Instructions, error) { func (ec *elfCode) loadPrograms(progSections, relSections map[elf.SectionIndex]*elf.Section, btf *btf.Spec) (map[string]*ProgramSpec, error) {
var ( var (
progs = make(map[string]*ProgramSpec) progs []*ProgramSpec
libs []asm.Instructions libs []*ProgramSpec
) )
for idx, prog := range progSections { for idx, prog := range progSections {
syms := ec.symbolsPerSection[idx] syms := ec.symbolsPerSection[idx]
if len(syms) == 0 { if len(syms) == 0 {
return nil, nil, errors.Errorf("section %v: missing symbols", prog.Name) return nil, errors.Errorf("section %v: missing symbols", prog.Name)
} }
funcSym := syms[0] funcSym := syms[0]
if funcSym == "" { if funcSym == "" {
return nil, nil, errors.Errorf("section %v: no label at start", prog.Name) return nil, errors.Errorf("section %v: no label at start", prog.Name)
} }
rels, err := ec.loadRelocations(relSections[idx]) rels, err := ec.loadRelocations(relSections[idx])
if err != nil { if err != nil {
return nil, nil, errors.Wrapf(err, "program %s: can't load relocations", funcSym) return nil, errors.Wrapf(err, "program %s: can't load relocations", funcSym)
} }
insns, err := ec.loadInstructions(prog, syms, rels) insns, length, err := ec.loadInstructions(prog, syms, rels)
if err != nil { if err != nil {
return nil, nil, errors.Wrapf(err, "program %s: can't unmarshal instructions", funcSym) return nil, errors.Wrapf(err, "program %s: can't unmarshal instructions", funcSym)
} }
if progType, attachType := getProgType(prog.Name); progType == UnspecifiedProgram { progType, attachType := getProgType(prog.Name)
spec := &ProgramSpec{
Name: funcSym,
Type: progType,
AttachType: attachType,
License: ec.license,
KernelVersion: ec.version,
Instructions: insns,
}
if btf != nil {
spec.BTF, err = btf.Program(prog.Name, length)
if err != nil {
return nil, errors.Wrapf(err, "BTF for section %s (program %s)", prog.Name, funcSym)
}
}
if spec.Type == UnspecifiedProgram {
// There is no single name we can use for "library" sections, // There is no single name we can use for "library" sections,
// since they may contain multiple functions. We'll decode the // since they may contain multiple functions. We'll decode the
// labels they contain later on, and then link sections that way. // labels they contain later on, and then link sections that way.
libs = append(libs, insns) libs = append(libs, spec)
} else { } else {
progs[funcSym] = &ProgramSpec{ progs = append(progs, spec)
Name: funcSym,
Type: progType,
AttachType: attachType,
License: license,
KernelVersion: version,
Instructions: insns,
}
} }
} }
return progs, libs, nil
res := make(map[string]*ProgramSpec, len(progs))
for _, prog := range progs {
err := link(prog, libs)
if err != nil {
return nil, errors.Wrapf(err, "program %s", prog.Name)
}
res[prog.Name] = prog
}
return res, nil
} }
func (ec *elfCode) loadInstructions(section *elf.Section, symbols, relocations map[uint64]string) (asm.Instructions, error) { func (ec *elfCode) loadInstructions(section *elf.Section, symbols, relocations map[uint64]string) (asm.Instructions, uint64, error) {
var ( var (
r = section.Open() r = section.Open()
insns asm.Instructions insns asm.Instructions
@ -183,10 +218,10 @@ func (ec *elfCode) loadInstructions(section *elf.Section, symbols, relocations m
for { for {
n, err := ins.Unmarshal(r, ec.ByteOrder) n, err := ins.Unmarshal(r, ec.ByteOrder)
if err == io.EOF { if err == io.EOF {
return insns, nil return insns, offset, nil
} }
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "offset %d", offset) return nil, 0, errors.Wrapf(err, "offset %d", offset)
} }
ins.Symbol = symbols[offset] ins.Symbol = symbols[offset]
@ -197,19 +232,15 @@ func (ec *elfCode) loadInstructions(section *elf.Section, symbols, relocations m
} }
} }
func (ec *elfCode) loadMaps(mapSections map[elf.SectionIndex]*elf.Section) (map[string]*MapSpec, error) { func (ec *elfCode) loadMaps(maps map[string]*MapSpec, mapSections map[elf.SectionIndex]*elf.Section) error {
var (
maps = make(map[string]*MapSpec)
b = make([]byte, 1)
)
for idx, sec := range mapSections { for idx, sec := range mapSections {
syms := ec.symbolsPerSection[idx] syms := ec.symbolsPerSection[idx]
if len(syms) == 0 { if len(syms) == 0 {
return nil, errors.Errorf("section %v: no symbols", sec.Name) return errors.Errorf("section %v: no symbols", sec.Name)
} }
if sec.Size%uint64(len(syms)) != 0 { if sec.Size%uint64(len(syms)) != 0 {
return nil, errors.Errorf("section %v: map descriptors are not of equal size", sec.Name) return errors.Errorf("section %v: map descriptors are not of equal size", sec.Name)
} }
var ( var (
@ -219,12 +250,11 @@ func (ec *elfCode) loadMaps(mapSections map[elf.SectionIndex]*elf.Section) (map[
for i, offset := 0, uint64(0); i < len(syms); i, offset = i+1, offset+size { for i, offset := 0, uint64(0); i < len(syms); i, offset = i+1, offset+size {
mapSym := syms[offset] mapSym := syms[offset]
if mapSym == "" { if mapSym == "" {
fmt.Println(syms) return errors.Errorf("section %s: missing symbol for map at offset %d", sec.Name, offset)
return nil, errors.Errorf("section %s: missing symbol for map at offset %d", sec.Name, offset)
} }
if maps[mapSym] != nil { if maps[mapSym] != nil {
return nil, errors.Errorf("section %v: map %v already exists", sec.Name, mapSym) return errors.Errorf("section %v: map %v already exists", sec.Name, mapSym)
} }
lr := io.LimitReader(r, int64(size)) lr := io.LimitReader(r, int64(size))
@ -232,51 +262,152 @@ func (ec *elfCode) loadMaps(mapSections map[elf.SectionIndex]*elf.Section) (map[
var spec MapSpec var spec MapSpec
switch { switch {
case binary.Read(lr, ec.ByteOrder, &spec.Type) != nil: case binary.Read(lr, ec.ByteOrder, &spec.Type) != nil:
return nil, errors.Errorf("map %v: missing type", mapSym) return errors.Errorf("map %v: missing type", mapSym)
case binary.Read(lr, ec.ByteOrder, &spec.KeySize) != nil: case binary.Read(lr, ec.ByteOrder, &spec.KeySize) != nil:
return nil, errors.Errorf("map %v: missing key size", mapSym) return errors.Errorf("map %v: missing key size", mapSym)
case binary.Read(lr, ec.ByteOrder, &spec.ValueSize) != nil: case binary.Read(lr, ec.ByteOrder, &spec.ValueSize) != nil:
return nil, errors.Errorf("map %v: missing value size", mapSym) return errors.Errorf("map %v: missing value size", mapSym)
case binary.Read(lr, ec.ByteOrder, &spec.MaxEntries) != nil: case binary.Read(lr, ec.ByteOrder, &spec.MaxEntries) != nil:
return nil, errors.Errorf("map %v: missing max entries", mapSym) return errors.Errorf("map %v: missing max entries", mapSym)
case binary.Read(lr, ec.ByteOrder, &spec.Flags) != nil: case binary.Read(lr, ec.ByteOrder, &spec.Flags) != nil:
return nil, errors.Errorf("map %v: missing flags", mapSym) return errors.Errorf("map %v: missing flags", mapSym)
} }
for { if _, err := io.Copy(internal.DiscardZeroes{}, lr); err != nil {
_, err := lr.Read(b) return errors.Errorf("map %v: unknown and non-zero fields in definition", mapSym)
if err == io.EOF {
break
}
if err != nil {
return nil, err
}
if b[0] != 0 {
return nil, errors.Errorf("map %v: unknown and non-zero fields in definition", mapSym)
}
} }
maps[mapSym] = &spec maps[mapSym] = &spec
} }
} }
return maps, nil
return nil
}
func (ec *elfCode) loadBTFMaps(maps map[string]*MapSpec, mapSections map[elf.SectionIndex]*elf.Section, spec *btf.Spec) error {
if spec == nil {
return errors.Errorf("missing BTF")
}
for idx, sec := range mapSections {
syms := ec.symbolsPerSection[idx]
if len(syms) == 0 {
return errors.Errorf("section %v: no symbols", sec.Name)
}
for _, sym := range syms {
if maps[sym] != nil {
return errors.Errorf("section %v: map %v already exists", sec.Name, sym)
}
btfMap, err := spec.Map(sym)
if err != nil {
return errors.Wrapf(err, "map %v: can't get BTF", sym)
}
spec, err := mapSpecFromBTF(btfMap)
if err != nil {
return errors.Wrapf(err, "map %v", sym)
}
maps[sym] = spec
}
}
return nil
}
func mapSpecFromBTF(btfMap *btf.Map) (*MapSpec, error) {
var (
mapType, flags, maxEntries uint32
err error
)
for _, member := range btf.MapType(btfMap).Members {
switch member.Name {
case "type":
mapType, err = uintFromBTF(member.Type)
if err != nil {
return nil, errors.Wrap(err, "can't get type")
}
case "map_flags":
flags, err = uintFromBTF(member.Type)
if err != nil {
return nil, errors.Wrap(err, "can't get BTF map flags")
}
case "max_entries":
maxEntries, err = uintFromBTF(member.Type)
if err != nil {
return nil, errors.Wrap(err, "can't get BTF map max entries")
}
case "key":
case "value":
default:
return nil, errors.Errorf("unrecognized field %s in BTF map definition", member.Name)
}
}
keySize, err := btf.Sizeof(btf.MapKey(btfMap))
if err != nil {
return nil, errors.Wrap(err, "can't get size of BTF key")
}
valueSize, err := btf.Sizeof(btf.MapValue(btfMap))
if err != nil {
return nil, errors.Wrap(err, "can't get size of BTF value")
}
return &MapSpec{
Type: MapType(mapType),
KeySize: uint32(keySize),
ValueSize: uint32(valueSize),
MaxEntries: maxEntries,
Flags: flags,
BTF: btfMap,
}, nil
}
// uintFromBTF resolves the __uint macro, which is a pointer to a sized
// array, e.g. for int (*foo)[10], this function will return 10.
func uintFromBTF(typ btf.Type) (uint32, error) {
ptr, ok := typ.(*btf.Pointer)
if !ok {
return 0, errors.Errorf("not a pointer: %v", typ)
}
arr, ok := ptr.Target.(*btf.Array)
if !ok {
return 0, errors.Errorf("not a pointer to array: %v", typ)
}
return arr.Nelems, nil
} }
func getProgType(v string) (ProgramType, AttachType) { func getProgType(v string) (ProgramType, AttachType) {
types := map[string]ProgramType{ types := map[string]ProgramType{
// From https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/tools/lib/bpf/libbpf.c#n3568 // From https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/tools/lib/bpf/libbpf.c#n3568
"socket": SocketFilter, "socket": SocketFilter,
"seccomp": SocketFilter, "seccomp": SocketFilter,
"kprobe/": Kprobe, "kprobe/": Kprobe,
"kretprobe/": Kprobe, "uprobe/": Kprobe,
"tracepoint/": TracePoint, "kretprobe/": Kprobe,
"xdp": XDP, "uretprobe/": Kprobe,
"perf_event": PerfEvent, "tracepoint/": TracePoint,
"sockops": SockOps, "raw_tracepoint/": RawTracepoint,
"sk_skb": SkSKB, "xdp": XDP,
"sk_msg": SkMsg, "perf_event": PerfEvent,
"lirc_mode2": LircMode2, "lwt_in": LWTIn,
"flow_dissector": FlowDissector, "lwt_out": LWTOut,
"lwt_xmit": LWTXmit,
"lwt_seg6local": LWTSeg6Local,
"sockops": SockOps,
"sk_skb": SkSKB,
"sk_msg": SkMsg,
"lirc_mode2": LircMode2,
"flow_dissector": FlowDissector,
"cgroup_skb/": CGroupSKB, "cgroup_skb/": CGroupSKB,
"cgroup/dev": CGroupDevice, "cgroup/dev": CGroupDevice,

530
vendor/github.com/cilium/ebpf/internal/btf/btf.go generated vendored Normal file
View File

@ -0,0 +1,530 @@
package btf
import (
"bytes"
"debug/elf"
"encoding/binary"
"io"
"io/ioutil"
"math"
"reflect"
"unsafe"
"github.com/cilium/ebpf/internal"
"github.com/cilium/ebpf/internal/unix"
"github.com/pkg/errors"
)
const btfMagic = 0xeB9F
// Spec represents decoded BTF.
type Spec struct {
rawTypes []rawType
strings stringTable
types map[string][]Type
funcInfos map[string]extInfo
lineInfos map[string]extInfo
}
type btfHeader struct {
Magic uint16
Version uint8
Flags uint8
HdrLen uint32
TypeOff uint32
TypeLen uint32
StringOff uint32
StringLen uint32
}
// LoadSpecFromReader reads BTF sections from an ELF.
//
// Returns a nil Spec and no error if no BTF was present.
func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) {
file, err := elf.NewFile(rd)
if err != nil {
return nil, err
}
defer file.Close()
var (
btfSection *elf.Section
btfExtSection *elf.Section
)
for _, sec := range file.Sections {
switch sec.Name {
case ".BTF":
btfSection = sec
case ".BTF.ext":
btfExtSection = sec
}
}
if btfSection == nil {
return nil, nil
}
spec, err := parseBTF(btfSection.Open(), file.ByteOrder)
if err != nil {
return nil, err
}
if btfExtSection != nil {
spec.funcInfos, spec.lineInfos, err = parseExtInfos(btfExtSection.Open(), file.ByteOrder, spec.strings)
if err != nil {
return nil, errors.Wrap(err, "can't read ext info")
}
}
return spec, nil
}
func parseBTF(btf io.ReadSeeker, bo binary.ByteOrder) (*Spec, error) {
rawBTF, err := ioutil.ReadAll(btf)
if err != nil {
return nil, errors.Wrap(err, "can't read BTF")
}
rd := bytes.NewReader(rawBTF)
var header btfHeader
if err := binary.Read(rd, bo, &header); err != nil {
return nil, errors.Wrap(err, "can't read header")
}
if header.Magic != btfMagic {
return nil, errors.Errorf("incorrect magic value %v", header.Magic)
}
if header.Version != 1 {
return nil, errors.Errorf("unexpected version %v", header.Version)
}
if header.Flags != 0 {
return nil, errors.Errorf("unsupported flags %v", header.Flags)
}
remainder := int64(header.HdrLen) - int64(binary.Size(&header))
if remainder < 0 {
return nil, errors.New("header is too short")
}
if _, err := io.CopyN(internal.DiscardZeroes{}, rd, remainder); err != nil {
return nil, errors.Wrap(err, "header padding")
}
if _, err := rd.Seek(int64(header.HdrLen+header.StringOff), io.SeekStart); err != nil {
return nil, errors.Wrap(err, "can't seek to start of string section")
}
strings, err := readStringTable(io.LimitReader(rd, int64(header.StringLen)))
if err != nil {
return nil, errors.Wrap(err, "can't read type names")
}
if _, err := rd.Seek(int64(header.HdrLen+header.TypeOff), io.SeekStart); err != nil {
return nil, errors.Wrap(err, "can't seek to start of type section")
}
rawTypes, err := readTypes(io.LimitReader(rd, int64(header.TypeLen)), bo)
if err != nil {
return nil, errors.Wrap(err, "can't read types")
}
types, err := inflateRawTypes(rawTypes, strings)
if err != nil {
return nil, err
}
return &Spec{
rawTypes: rawTypes,
types: types,
strings: strings,
funcInfos: make(map[string]extInfo),
lineInfos: make(map[string]extInfo),
}, nil
}
func (s *Spec) marshal(bo binary.ByteOrder) ([]byte, error) {
var (
buf bytes.Buffer
header = new(btfHeader)
headerLen = binary.Size(header)
)
// Reserve space for the header. We have to write it last since
// we don't know the size of the type section yet.
_, _ = buf.Write(make([]byte, headerLen))
// Write type section, just after the header.
for _, typ := range s.rawTypes {
if typ.Kind() == kindDatasec {
// Datasec requires patching with information from the ELF
// file. We don't support this at the moment, so patch
// out any Datasec by turning it into a void*.
typ = rawType{}
typ.SetKind(kindPointer)
}
if err := typ.Marshal(&buf, bo); err != nil {
return nil, errors.Wrap(err, "can't marshal BTF")
}
}
typeLen := uint32(buf.Len() - headerLen)
// Write string section after type section.
_, _ = buf.Write(s.strings)
// Fill out the header, and write it out.
header = &btfHeader{
Magic: btfMagic,
Version: 1,
Flags: 0,
HdrLen: uint32(headerLen),
TypeOff: 0,
TypeLen: typeLen,
StringOff: typeLen,
StringLen: uint32(len(s.strings)),
}
raw := buf.Bytes()
err := binary.Write(sliceWriter(raw[:headerLen]), bo, header)
if err != nil {
return nil, errors.Wrap(err, "can't write header")
}
return raw, nil
}
type sliceWriter []byte
func (sw sliceWriter) Write(p []byte) (int, error) {
if len(p) != len(sw) {
return 0, errors.New("size doesn't match")
}
return copy(sw, p), nil
}
// Program finds the BTF for a specific section.
//
// Length is the number of bytes in the raw BPF instruction stream.
//
// Returns an error if there is no BTF.
func (s *Spec) Program(name string, length uint64) (*Program, error) {
if length == 0 {
return nil, errors.New("length musn't be zero")
}
funcInfos, funcOK := s.funcInfos[name]
lineInfos, lineOK := s.lineInfos[name]
if !funcOK && !lineOK {
return nil, errors.Errorf("no BTF for program %s", name)
}
return &Program{s, length, funcInfos, lineInfos}, nil
}
// Map finds the BTF for a map.
//
// Returns an error if there is no BTF for the given name.
func (s *Spec) Map(name string) (*Map, error) {
var mapVar Var
if err := s.FindType(name, &mapVar); err != nil {
return nil, err
}
mapStruct, ok := mapVar.Type.(*Struct)
if !ok {
return nil, errors.Errorf("expected struct, have %s", mapVar.Type)
}
var key, value Type
for _, member := range mapStruct.Members {
switch member.Name {
case "key":
key = member.Type
case "value":
value = member.Type
}
}
if key == nil {
return nil, errors.Errorf("map %s: missing 'key' in type", name)
}
if value == nil {
return nil, errors.Errorf("map %s: missing 'value' in type", name)
}
return &Map{mapStruct, s, key, value}, nil
}
var errNotFound = errors.New("not found")
// FindType searches for a type with a specific name.
//
// hint determines the type of the returned Type.
//
// Returns an error if there is no or multiple matches.
func (s *Spec) FindType(name string, typ Type) error {
var (
wanted = reflect.TypeOf(typ)
candidate Type
)
for _, typ := range s.types[name] {
if reflect.TypeOf(typ) != wanted {
continue
}
if candidate != nil {
return errors.Errorf("type %s: multiple candidates for %T", name, typ)
}
candidate = typ
}
if candidate == nil {
return errors.WithMessagef(errNotFound, "type %s", name)
}
value := reflect.Indirect(reflect.ValueOf(copyType(candidate)))
reflect.Indirect(reflect.ValueOf(typ)).Set(value)
return nil
}
// Handle is a reference to BTF loaded into the kernel.
type Handle struct {
fd *internal.FD
}
// NewHandle loads BTF into the kernel.
//
// Returns an error if BTF is not supported, which can
// be checked by IsNotSupported.
func NewHandle(spec *Spec) (*Handle, error) {
if err := haveBTF(); err != nil {
return nil, err
}
btf, err := spec.marshal(internal.NativeEndian)
if err != nil {
return nil, errors.Wrap(err, "can't marshal BTF")
}
if uint64(len(btf)) > math.MaxUint32 {
return nil, errors.New("BTF exceeds the maximum size")
}
attr := &bpfLoadBTFAttr{
btf: internal.NewSlicePointer(btf),
btfSize: uint32(len(btf)),
}
fd, err := bpfLoadBTF(attr)
if err != nil {
logBuf := make([]byte, 64*1024)
attr.logBuf = internal.NewSlicePointer(logBuf)
attr.btfLogSize = uint32(len(logBuf))
attr.btfLogLevel = 1
_, logErr := bpfLoadBTF(attr)
return nil, internal.ErrorWithLog(err, logBuf, logErr)
}
return &Handle{fd}, nil
}
// Close destroys the handle.
//
// Subsequent calls to FD will return an invalid value.
func (h *Handle) Close() error {
return h.fd.Close()
}
// FD returns the file descriptor for the handle.
func (h *Handle) FD() int {
value, err := h.fd.Value()
if err != nil {
return -1
}
return int(value)
}
// Map is the BTF for a map.
type Map struct {
definition *Struct
spec *Spec
key, value Type
}
// MapSpec should be a method on Map, but is a free function
// to hide it from users of the ebpf package.
func MapSpec(m *Map) *Spec {
return m.spec
}
// MapType should be a method on Map, but is a free function
// to hide it from users of the ebpf package.
func MapType(m *Map) *Struct {
return m.definition
}
// MapKey should be a method on Map, but is a free function
// to hide it from users of the ebpf package.
func MapKey(m *Map) Type {
return m.key
}
// MapValue should be a method on Map, but is a free function
// to hide it from users of the ebpf package.
func MapValue(m *Map) Type {
return m.value
}
// Program is the BTF information for a stream of instructions.
type Program struct {
spec *Spec
length uint64
funcInfos, lineInfos extInfo
}
// ProgramSpec returns the Spec needed for loading function and line infos into the kernel.
//
// This is a free function instead of a method to hide it from users
// of package ebpf.
func ProgramSpec(s *Program) *Spec {
return s.spec
}
// ProgramAppend the information from other to the Program.
//
// This is a free function instead of a method to hide it from users
// of package ebpf.
func ProgramAppend(s, other *Program) error {
funcInfos, err := s.funcInfos.append(other.funcInfos, s.length)
if err != nil {
return errors.Wrap(err, "func infos")
}
lineInfos, err := s.lineInfos.append(other.lineInfos, s.length)
if err != nil {
return errors.Wrap(err, "line infos")
}
s.length += other.length
s.funcInfos = funcInfos
s.lineInfos = lineInfos
return nil
}
// ProgramFuncInfos returns the binary form of BTF function infos.
//
// This is a free function instead of a method to hide it from users
// of package ebpf.
func ProgramFuncInfos(s *Program) (recordSize uint32, bytes []byte, err error) {
bytes, err = s.funcInfos.MarshalBinary()
if err != nil {
return 0, nil, err
}
return s.funcInfos.recordSize, bytes, nil
}
// ProgramLineInfos returns the binary form of BTF line infos.
//
// This is a free function instead of a method to hide it from users
// of package ebpf.
func ProgramLineInfos(s *Program) (recordSize uint32, bytes []byte, err error) {
bytes, err = s.lineInfos.MarshalBinary()
if err != nil {
return 0, nil, err
}
return s.lineInfos.recordSize, bytes, nil
}
// IsNotSupported returns true if the error indicates that the kernel
// doesn't support BTF.
func IsNotSupported(err error) bool {
ufe, ok := errors.Cause(err).(*internal.UnsupportedFeatureError)
return ok && ufe.Name == "BTF"
}
type bpfLoadBTFAttr struct {
btf internal.Pointer
logBuf internal.Pointer
btfSize uint32
btfLogSize uint32
btfLogLevel uint32
}
func bpfLoadBTF(attr *bpfLoadBTFAttr) (*internal.FD, error) {
const _BTFLoad = 18
fd, err := internal.BPF(_BTFLoad, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
if err != nil {
return nil, err
}
return internal.NewFD(uint32(fd)), nil
}
func minimalBTF(bo binary.ByteOrder) []byte {
const minHeaderLength = 24
var (
types struct {
Integer btfType
Var btfType
btfVar struct{ Linkage uint32 }
}
typLen = uint32(binary.Size(&types))
strings = []byte{0, 'a', 0}
header = btfHeader{
Magic: btfMagic,
Version: 1,
HdrLen: minHeaderLength,
TypeOff: 0,
TypeLen: typLen,
StringOff: typLen,
StringLen: uint32(len(strings)),
}
)
// We use a BTF_KIND_VAR here, to make sure that
// the kernel understands BTF at least as well as we
// do. BTF_KIND_VAR was introduced ~5.1.
types.Integer.SetKind(kindPointer)
types.Var.NameOff = 1
types.Var.SetKind(kindVar)
types.Var.SizeType = 1
buf := new(bytes.Buffer)
_ = binary.Write(buf, bo, &header)
_ = binary.Write(buf, bo, &types)
buf.Write(strings)
return buf.Bytes()
}
var haveBTF = internal.FeatureTest("BTF", "5.1", func() bool {
btf := minimalBTF(internal.NativeEndian)
fd, err := bpfLoadBTF(&bpfLoadBTFAttr{
btf: internal.NewSlicePointer(btf),
btfSize: uint32(len(btf)),
})
if err == nil {
fd.Close()
}
// Check for EINVAL specifically, rather than err != nil since we
// otherwise misdetect due to insufficient permissions.
return errors.Cause(err) != unix.EINVAL
})

190
vendor/github.com/cilium/ebpf/internal/btf/btf_types.go generated vendored Normal file
View File

@ -0,0 +1,190 @@
package btf
import (
"encoding/binary"
"io"
"github.com/pkg/errors"
)
// btfKind describes a Type.
type btfKind uint8
// Equivalents of the BTF_KIND_* constants.
const (
kindUnknown btfKind = iota
kindInt
kindPointer
kindArray
kindStruct
kindUnion
kindEnum
kindForward
kindTypedef
kindVolatile
kindConst
kindRestrict
// Added ~4.20
kindFunc
kindFuncProto
// Added ~5.1
kindVar
kindDatasec
)
const (
btfTypeKindShift = 24
btfTypeKindLen = 4
btfTypeVlenShift = 0
btfTypeVlenMask = 16
)
// btfType is equivalent to struct btf_type in Documentation/bpf/btf.rst.
type btfType struct {
NameOff uint32
/* "info" bits arrangement
* bits 0-15: vlen (e.g. # of struct's members)
* bits 16-23: unused
* bits 24-27: kind (e.g. int, ptr, array...etc)
* bits 28-30: unused
* bit 31: kind_flag, currently used by
* struct, union and fwd
*/
Info uint32
/* "size" is used by INT, ENUM, STRUCT and UNION.
* "size" tells the size of the type it is describing.
*
* "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT,
* FUNC and FUNC_PROTO.
* "type" is a type_id referring to another type.
*/
SizeType uint32
}
func mask(len uint32) uint32 {
return (1 << len) - 1
}
func (bt *btfType) info(len, shift uint32) uint32 {
return (bt.Info >> shift) & mask(len)
}
func (bt *btfType) setInfo(value, len, shift uint32) {
bt.Info &^= mask(len) << shift
bt.Info |= (value & mask(len)) << shift
}
func (bt *btfType) Kind() btfKind {
return btfKind(bt.info(btfTypeKindLen, btfTypeKindShift))
}
func (bt *btfType) SetKind(kind btfKind) {
bt.setInfo(uint32(kind), btfTypeKindLen, btfTypeKindShift)
}
func (bt *btfType) Vlen() int {
return int(bt.info(btfTypeVlenMask, btfTypeVlenShift))
}
func (bt *btfType) SetVlen(vlen int) {
bt.setInfo(uint32(vlen), btfTypeVlenMask, btfTypeVlenShift)
}
func (bt *btfType) Type() TypeID {
// TODO: Panic here if wrong kind?
return TypeID(bt.SizeType)
}
func (bt *btfType) Size() uint32 {
// TODO: Panic here if wrong kind?
return bt.SizeType
}
type rawType struct {
btfType
data interface{}
}
func (rt *rawType) Marshal(w io.Writer, bo binary.ByteOrder) error {
if err := binary.Write(w, bo, &rt.btfType); err != nil {
return err
}
if rt.data == nil {
return nil
}
return binary.Write(w, bo, rt.data)
}
type btfArray struct {
Type TypeID
IndexType TypeID
Nelems uint32
}
type btfMember struct {
NameOff uint32
Type TypeID
Offset uint32
}
func readTypes(r io.Reader, bo binary.ByteOrder) ([]rawType, error) {
var (
header btfType
types []rawType
)
for id := TypeID(1); ; id++ {
if err := binary.Read(r, bo, &header); err == io.EOF {
return types, nil
} else if err != nil {
return nil, errors.Wrapf(err, "can't read type info for id %v", id)
}
var data interface{}
switch header.Kind() {
case kindInt:
// sizeof(uint32)
data = make([]byte, 4)
case kindPointer:
case kindArray:
data = new(btfArray)
case kindStruct:
fallthrough
case kindUnion:
data = make([]btfMember, header.Vlen())
case kindEnum:
// sizeof(struct btf_enum)
data = make([]byte, header.Vlen()*4*2)
case kindForward:
case kindTypedef:
case kindVolatile:
case kindConst:
case kindRestrict:
case kindFunc:
case kindFuncProto:
// sizeof(struct btf_param)
data = make([]byte, header.Vlen()*4*2)
case kindVar:
// sizeof(struct btf_variable)
data = make([]byte, 4)
case kindDatasec:
// sizeof(struct btf_var_secinfo)
data = make([]byte, header.Vlen()*4*3)
default:
return nil, errors.Errorf("type id %v: unknown kind: %v", id, header.Kind())
}
if data == nil {
types = append(types, rawType{header, nil})
continue
}
if err := binary.Read(r, bo, data); err != nil {
return nil, errors.Wrapf(err, "type id %d: kind %v: can't read %T", id, header.Kind(), data)
}
types = append(types, rawType{header, data})
}
}

8
vendor/github.com/cilium/ebpf/internal/btf/doc.go generated vendored Normal file
View File

@ -0,0 +1,8 @@
// Package btf handles data encoded according to the BPF Type Format.
//
// The canonical documentation lives in the Linux kernel repository and is
// available at https://www.kernel.org/doc/html/latest/bpf/btf.html
//
// The API is very much unstable. You should only use this via the main
// ebpf library.
package btf

184
vendor/github.com/cilium/ebpf/internal/btf/ext_info.go generated vendored Normal file
View File

@ -0,0 +1,184 @@
package btf
import (
"bytes"
"encoding/binary"
"io"
"io/ioutil"
"github.com/cilium/ebpf/asm"
"github.com/cilium/ebpf/internal"
"github.com/pkg/errors"
)
type btfExtHeader struct {
Magic uint16
Version uint8
Flags uint8
HdrLen uint32
FuncInfoOff uint32
FuncInfoLen uint32
LineInfoOff uint32
LineInfoLen uint32
}
func parseExtInfos(r io.ReadSeeker, bo binary.ByteOrder, strings stringTable) (funcInfo, lineInfo map[string]extInfo, err error) {
const expectedMagic = 0xeB9F
var header btfExtHeader
if err := binary.Read(r, bo, &header); err != nil {
return nil, nil, errors.Wrap(err, "can't read header")
}
if header.Magic != expectedMagic {
return nil, nil, errors.Errorf("incorrect magic value %v", header.Magic)
}
if header.Version != 1 {
return nil, nil, errors.Errorf("unexpected version %v", header.Version)
}
if header.Flags != 0 {
return nil, nil, errors.Errorf("unsupported flags %v", header.Flags)
}
remainder := int64(header.HdrLen) - int64(binary.Size(&header))
if remainder < 0 {
return nil, nil, errors.New("header is too short")
}
// Of course, the .BTF.ext header has different semantics than the
// .BTF ext header. We need to ignore non-null values.
_, err = io.CopyN(ioutil.Discard, r, remainder)
if err != nil {
return nil, nil, errors.Wrap(err, "header padding")
}
if _, err := r.Seek(int64(header.HdrLen+header.FuncInfoOff), io.SeekStart); err != nil {
return nil, nil, errors.Wrap(err, "can't seek to function info section")
}
funcInfo, err = parseExtInfo(io.LimitReader(r, int64(header.FuncInfoLen)), bo, strings)
if err != nil {
return nil, nil, errors.Wrap(err, "function info")
}
if _, err := r.Seek(int64(header.HdrLen+header.LineInfoOff), io.SeekStart); err != nil {
return nil, nil, errors.Wrap(err, "can't seek to line info section")
}
lineInfo, err = parseExtInfo(io.LimitReader(r, int64(header.LineInfoLen)), bo, strings)
if err != nil {
return nil, nil, errors.Wrap(err, "line info")
}
return funcInfo, lineInfo, nil
}
type btfExtInfoSec struct {
SecNameOff uint32
NumInfo uint32
}
type extInfoRecord struct {
InsnOff uint64
Opaque []byte
}
type extInfo struct {
recordSize uint32
records []extInfoRecord
}
func (ei extInfo) append(other extInfo, offset uint64) (extInfo, error) {
if other.recordSize != ei.recordSize {
return extInfo{}, errors.Errorf("ext_info record size mismatch, want %d (got %d)", ei.recordSize, other.recordSize)
}
records := make([]extInfoRecord, 0, len(ei.records)+len(other.records))
records = append(records, ei.records...)
for _, info := range other.records {
records = append(records, extInfoRecord{
InsnOff: info.InsnOff + offset,
Opaque: info.Opaque,
})
}
return extInfo{ei.recordSize, records}, nil
}
func (ei extInfo) MarshalBinary() ([]byte, error) {
if len(ei.records) == 0 {
return nil, nil
}
buf := bytes.NewBuffer(make([]byte, 0, int(ei.recordSize)*len(ei.records)))
for _, info := range ei.records {
// The kernel expects offsets in number of raw bpf instructions,
// while the ELF tracks it in bytes.
insnOff := uint32(info.InsnOff / asm.InstructionSize)
if err := binary.Write(buf, internal.NativeEndian, insnOff); err != nil {
return nil, errors.Wrap(err, "can't write instruction offset")
}
buf.Write(info.Opaque)
}
return buf.Bytes(), nil
}
func parseExtInfo(r io.Reader, bo binary.ByteOrder, strings stringTable) (map[string]extInfo, error) {
var recordSize uint32
if err := binary.Read(r, bo, &recordSize); err != nil {
return nil, errors.Wrap(err, "can't read record size")
}
if recordSize < 4 {
// Need at least insnOff
return nil, errors.New("record size too short")
}
result := make(map[string]extInfo)
for {
var infoHeader btfExtInfoSec
if err := binary.Read(r, bo, &infoHeader); err == io.EOF {
return result, nil
} else if err != nil {
return nil, errors.Wrap(err, "can't read ext info header")
}
secName, err := strings.Lookup(infoHeader.SecNameOff)
if err != nil {
return nil, errors.Wrap(err, "can't get section name")
}
if infoHeader.NumInfo == 0 {
return nil, errors.Errorf("section %s has invalid number of records", secName)
}
var records []extInfoRecord
for i := uint32(0); i < infoHeader.NumInfo; i++ {
var byteOff uint32
if err := binary.Read(r, bo, &byteOff); err != nil {
return nil, errors.Wrapf(err, "section %v: can't read extended info offset", secName)
}
buf := make([]byte, int(recordSize-4))
if _, err := io.ReadFull(r, buf); err != nil {
return nil, errors.Wrapf(err, "section %v: can't read record", secName)
}
if byteOff%asm.InstructionSize != 0 {
return nil, errors.Errorf("section %v: offset %v is not aligned with instruction size", secName, byteOff)
}
records = append(records, extInfoRecord{uint64(byteOff), buf})
}
result[secName] = extInfo{
recordSize,
records,
}
}
}

60
vendor/github.com/cilium/ebpf/internal/btf/strings.go generated vendored Normal file
View File

@ -0,0 +1,60 @@
package btf
import (
"bytes"
"io"
"io/ioutil"
"github.com/pkg/errors"
)
type stringTable []byte
func readStringTable(r io.Reader) (stringTable, error) {
contents, err := ioutil.ReadAll(r)
if err != nil {
return nil, errors.Wrap(err, "can't read string table")
}
if len(contents) < 1 {
return nil, errors.New("string table is empty")
}
if contents[0] != '\x00' {
return nil, errors.New("first item in string table is non-empty")
}
if contents[len(contents)-1] != '\x00' {
return nil, errors.New("string table isn't null terminated")
}
return stringTable(contents), nil
}
func (st stringTable) Lookup(offset uint32) (string, error) {
if int64(offset) > int64(^uint(0)>>1) {
return "", errors.Errorf("offset %d overflows int", offset)
}
pos := int(offset)
if pos >= len(st) {
return "", errors.Errorf("offset %d is out of bounds", offset)
}
if pos > 0 && st[pos-1] != '\x00' {
return "", errors.Errorf("offset %d isn't start of a string", offset)
}
str := st[pos:]
end := bytes.IndexByte(str, '\x00')
if end == -1 {
return "", errors.Errorf("offset %d isn't null terminated", offset)
}
return string(str[:end]), nil
}
func (st stringTable) LookupName(offset uint32) (Name, error) {
str, err := st.Lookup(offset)
return Name(str), err
}

550
vendor/github.com/cilium/ebpf/internal/btf/types.go generated vendored Normal file
View File

@ -0,0 +1,550 @@
package btf
import (
"math"
"github.com/pkg/errors"
)
const maxTypeDepth = 32
// TypeID identifies a type in a BTF section.
type TypeID uint32
// ID implements part of the Type interface.
func (tid TypeID) ID() TypeID {
return tid
}
// Type represents a type described by BTF.
type Type interface {
ID() TypeID
// Make a copy of the type, without copying Type members.
copy() Type
walk(*copyStack)
}
// Name identifies a type.
//
// Anonymous types have an empty name.
type Name string
func (n Name) name() string {
return string(n)
}
// Void is the unit type of BTF.
type Void struct{}
func (v Void) ID() TypeID { return 0 }
func (v Void) copy() Type { return Void{} }
func (v Void) walk(*copyStack) {}
// Int is an integer of a given length.
type Int struct {
TypeID
Name
// The size of the integer in bytes.
Size uint32
}
func (i *Int) size() uint32 { return i.Size }
func (i *Int) walk(*copyStack) {}
func (i *Int) copy() Type {
cpy := *i
return &cpy
}
// Pointer is a pointer to another type.
type Pointer struct {
TypeID
Target Type
}
func (p *Pointer) size() uint32 { return 8 }
func (p *Pointer) walk(cs *copyStack) { cs.push(&p.Target) }
func (p *Pointer) copy() Type {
cpy := *p
return &cpy
}
// Array is an array with a fixed number of elements.
type Array struct {
TypeID
Type Type
Nelems uint32
}
func (arr *Array) walk(cs *copyStack) { cs.push(&arr.Type) }
func (arr *Array) copy() Type {
cpy := *arr
return &cpy
}
// Struct is a compound type of consecutive members.
type Struct struct {
TypeID
Name
// The size of the struct including padding, in bytes
Size uint32
Members []Member
}
func (s *Struct) size() uint32 { return s.Size }
func (s *Struct) walk(cs *copyStack) {
for i := range s.Members {
cs.push(&s.Members[i].Type)
}
}
func (s *Struct) copy() Type {
cpy := *s
cpy.Members = copyMembers(cpy.Members)
return &cpy
}
// Union is a compound type where members occupy the same memory.
type Union struct {
TypeID
Name
// The size of the union including padding, in bytes.
Size uint32
Members []Member
}
func (u *Union) size() uint32 { return u.Size }
func (u *Union) walk(cs *copyStack) {
for i := range u.Members {
cs.push(&u.Members[i].Type)
}
}
func (u *Union) copy() Type {
cpy := *u
cpy.Members = copyMembers(cpy.Members)
return &cpy
}
// Member is part of a Struct or Union.
//
// It is not a valid Type.
type Member struct {
Name
Type Type
Offset uint32
}
func copyMembers(in []Member) []Member {
cpy := make([]Member, 0, len(in))
for _, member := range in {
cpy = append(cpy, member)
}
return cpy
}
// Enum lists possible values.
type Enum struct {
TypeID
Name
}
func (e *Enum) size() uint32 { return 4 }
func (e *Enum) walk(*copyStack) {}
func (e *Enum) copy() Type {
cpy := *e
return &cpy
}
// Fwd is a forward declaration of a Type.
type Fwd struct {
TypeID
Name
}
func (f *Fwd) walk(*copyStack) {}
func (f *Fwd) copy() Type {
cpy := *f
return &cpy
}
// Typedef is an alias of a Type.
type Typedef struct {
TypeID
Name
Type Type
}
func (td *Typedef) walk(cs *copyStack) { cs.push(&td.Type) }
func (td *Typedef) copy() Type {
cpy := *td
return &cpy
}
// Volatile is a modifier.
type Volatile struct {
TypeID
Type Type
}
func (v *Volatile) walk(cs *copyStack) { cs.push(&v.Type) }
func (v *Volatile) copy() Type {
cpy := *v
return &cpy
}
// Const is a modifier.
type Const struct {
TypeID
Type Type
}
func (c *Const) walk(cs *copyStack) { cs.push(&c.Type) }
func (c *Const) copy() Type {
cpy := *c
return &cpy
}
// Restrict is a modifier.
type Restrict struct {
TypeID
Type Type
}
func (r *Restrict) walk(cs *copyStack) { cs.push(&r.Type) }
func (r *Restrict) copy() Type {
cpy := *r
return &cpy
}
// Func is a function definition.
type Func struct {
TypeID
Name
Type Type
}
func (f *Func) walk(cs *copyStack) { cs.push(&f.Type) }
func (f *Func) copy() Type {
cpy := *f
return &cpy
}
// FuncProto is a function declaration.
type FuncProto struct {
TypeID
Return Type
// Parameters not supported yet
}
func (fp *FuncProto) walk(cs *copyStack) { cs.push(&fp.Return) }
func (fp *FuncProto) copy() Type {
cpy := *fp
return &cpy
}
// Var is a global variable.
type Var struct {
TypeID
Name
Type Type
}
func (v *Var) walk(cs *copyStack) { cs.push(&v.Type) }
func (v *Var) copy() Type {
cpy := *v
return &cpy
}
// Datasec is a global program section containing data.
type Datasec struct {
TypeID
Name
Size uint32
}
func (ds *Datasec) size() uint32 { return ds.Size }
func (ds *Datasec) walk(*copyStack) {}
func (ds *Datasec) copy() Type {
cpy := *ds
return &cpy
}
type sizer interface {
size() uint32
}
var (
_ sizer = (*Int)(nil)
_ sizer = (*Pointer)(nil)
_ sizer = (*Struct)(nil)
_ sizer = (*Union)(nil)
_ sizer = (*Enum)(nil)
_ sizer = (*Datasec)(nil)
)
// Sizeof returns the size of a type in bytes.
//
// Returns an error if the size can't be computed.
func Sizeof(typ Type) (int, error) {
var (
n = int64(1)
elem int64
)
for i := 0; i < maxTypeDepth; i++ {
switch v := typ.(type) {
case *Array:
if n > 0 && int64(v.Nelems) > math.MaxInt64/n {
return 0, errors.New("overflow")
}
// Arrays may be of zero length, which allows
// n to be zero as well.
n *= int64(v.Nelems)
typ = v.Type
continue
case sizer:
elem = int64(v.size())
case *Typedef:
typ = v.Type
continue
case *Volatile:
typ = v.Type
continue
case *Const:
typ = v.Type
continue
case *Restrict:
typ = v.Type
continue
default:
return 0, errors.Errorf("unrecognized type %T", typ)
}
if n > 0 && elem > math.MaxInt64/n {
return 0, errors.New("overflow")
}
size := n * elem
if int64(int(size)) != size {
return 0, errors.New("overflow")
}
return int(size), nil
}
return 0, errors.New("exceeded type depth")
}
// copy a Type recursively.
//
// typ may form a cycle.
func copyType(typ Type) Type {
var (
copies = make(map[Type]Type)
work copyStack
)
for t := &typ; t != nil; t = work.pop() {
// *t is the identity of the type.
if cpy := copies[*t]; cpy != nil {
*t = cpy
continue
}
cpy := (*t).copy()
copies[*t] = cpy
*t = cpy
// Mark any nested types for copying.
cpy.walk(&work)
}
return typ
}
// copyStack keeps track of pointers to types which still
// need to be copied.
type copyStack []*Type
// push adds a type to the stack.
func (cs *copyStack) push(t *Type) {
*cs = append(*cs, t)
}
// pop returns the topmost Type, or nil.
func (cs *copyStack) pop() *Type {
n := len(*cs)
if n == 0 {
return nil
}
t := (*cs)[n-1]
*cs = (*cs)[:n-1]
return t
}
type namer interface {
name() string
}
var _ namer = Name("")
// inflateRawTypes takes a list of raw btf types linked via type IDs, and turns
// it into a graph of Types connected via pointers.
//
// Returns a map of named types (so, where NameOff is non-zero). Since BTF ignores
// compilation units, multiple types may share the same name. A Type may form a
// cyclic graph by pointing at itself.
func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (namedTypes map[string][]Type, err error) {
type fixup struct {
id TypeID
typ *Type
}
var fixups []fixup
convertMembers := func(raw []btfMember) ([]Member, error) {
// NB: The fixup below relies on pre-allocating this array to
// work, since otherwise append might re-allocate members.
members := make([]Member, 0, len(raw))
for i, btfMember := range raw {
name, err := rawStrings.LookupName(btfMember.NameOff)
if err != nil {
return nil, errors.Wrapf(err, "can't get name for member %d", i)
}
members = append(members, Member{
Name: name,
Offset: btfMember.Offset,
})
}
for i := range members {
fixups = append(fixups, fixup{raw[i].Type, &members[i].Type})
}
return members, nil
}
types := make([]Type, 0, len(rawTypes))
types = append(types, Void{})
namedTypes = make(map[string][]Type)
for i, raw := range rawTypes {
var (
// Void is defined to always be type ID 0, and is thus
// omitted from BTF.
id = TypeID(i + 1)
typ Type
)
name, err := rawStrings.LookupName(raw.NameOff)
if err != nil {
return nil, errors.Wrapf(err, "can't get name for type id %d", id)
}
switch raw.Kind() {
case kindInt:
typ = &Int{id, name, raw.Size()}
case kindPointer:
ptr := &Pointer{id, nil}
fixups = append(fixups, fixup{raw.Type(), &ptr.Target})
typ = ptr
case kindArray:
btfArr := raw.data.(*btfArray)
// IndexType is unused according to btf.rst.
// Don't make it available right now.
arr := &Array{id, nil, btfArr.Nelems}
fixups = append(fixups, fixup{btfArr.Type, &arr.Type})
typ = arr
case kindStruct:
members, err := convertMembers(raw.data.([]btfMember))
if err != nil {
return nil, errors.Wrapf(err, "struct %s (id %d)", name, id)
}
typ = &Struct{id, name, raw.Size(), members}
case kindUnion:
members, err := convertMembers(raw.data.([]btfMember))
if err != nil {
return nil, errors.Wrapf(err, "union %s (id %d)", name, id)
}
typ = &Union{id, name, raw.Size(), members}
case kindEnum:
typ = &Enum{id, name}
case kindForward:
typ = &Fwd{id, name}
case kindTypedef:
typedef := &Typedef{id, name, nil}
fixups = append(fixups, fixup{raw.Type(), &typedef.Type})
typ = typedef
case kindVolatile:
volatile := &Volatile{id, nil}
fixups = append(fixups, fixup{raw.Type(), &volatile.Type})
typ = volatile
case kindConst:
cnst := &Const{id, nil}
fixups = append(fixups, fixup{raw.Type(), &cnst.Type})
typ = cnst
case kindRestrict:
restrict := &Restrict{id, nil}
fixups = append(fixups, fixup{raw.Type(), &restrict.Type})
typ = restrict
case kindFunc:
fn := &Func{id, name, nil}
fixups = append(fixups, fixup{raw.Type(), &fn.Type})
typ = fn
case kindFuncProto:
fp := &FuncProto{id, nil}
fixups = append(fixups, fixup{raw.Type(), &fp.Return})
typ = fp
case kindVar:
v := &Var{id, name, nil}
fixups = append(fixups, fixup{raw.Type(), &v.Type})
typ = v
case kindDatasec:
typ = &Datasec{id, name, raw.SizeType}
default:
return nil, errors.Errorf("type id %d: unknown kind: %v", id, raw.Kind())
}
types = append(types, typ)
if namer, ok := typ.(namer); ok {
if name := namer.name(); name != "" {
namedTypes[name] = append(namedTypes[name], typ)
}
}
}
for _, fixup := range fixups {
i := int(fixup.id)
if i >= len(types) {
return nil, errors.Errorf("reference to invalid type id: %d", fixup.id)
}
*fixup.typ = types[i]
}
return namedTypes, nil
}

50
vendor/github.com/cilium/ebpf/internal/errors.go generated vendored Normal file
View File

@ -0,0 +1,50 @@
package internal
import (
"bytes"
"fmt"
"strings"
"github.com/cilium/ebpf/internal/unix"
"github.com/pkg/errors"
)
// ErrorWithLog returns an error that includes logs from the
// kernel verifier.
//
// logErr should be the error returned by the syscall that generated
// the log. It is used to check for truncation of the output.
func ErrorWithLog(err error, log []byte, logErr error) error {
logStr := strings.Trim(CString(log), "\t\r\n ")
if errors.Cause(logErr) == unix.ENOSPC {
logStr += " (truncated...)"
}
return &loadError{err, logStr}
}
type loadError struct {
cause error
log string
}
func (le *loadError) Error() string {
if le.log == "" {
return le.cause.Error()
}
return fmt.Sprintf("%s: %s", le.cause, le.log)
}
func (le *loadError) Cause() error {
return le.cause
}
// CString turns a NUL / zero terminated byte buffer into a string.
func CString(in []byte) string {
inLen := bytes.IndexByte(in, 0)
if inLen == -1 {
return ""
}
return string(in[:inLen])
}

63
vendor/github.com/cilium/ebpf/internal/fd.go generated vendored Normal file
View File

@ -0,0 +1,63 @@
package internal
import (
"runtime"
"strconv"
"github.com/cilium/ebpf/internal/unix"
"github.com/pkg/errors"
)
var ErrClosedFd = errors.New("use of closed file descriptor")
type FD struct {
raw int64
}
func NewFD(value uint32) *FD {
fd := &FD{int64(value)}
runtime.SetFinalizer(fd, (*FD).Close)
return fd
}
func (fd *FD) String() string {
return strconv.FormatInt(fd.raw, 10)
}
func (fd *FD) Value() (uint32, error) {
if fd.raw < 0 {
return 0, ErrClosedFd
}
return uint32(fd.raw), nil
}
func (fd *FD) Close() error {
if fd.raw < 0 {
return nil
}
value := int(fd.raw)
fd.raw = -1
fd.Forget()
return unix.Close(value)
}
func (fd *FD) Forget() {
runtime.SetFinalizer(fd, nil)
}
func (fd *FD) Dup() (*FD, error) {
if fd.raw < 0 {
return nil, ErrClosedFd
}
dup, err := unix.FcntlInt(uintptr(fd.raw), unix.F_DUPFD_CLOEXEC, 0)
if err != nil {
return nil, errors.Wrap(err, "can't dup fd")
}
return NewFD(uint32(dup)), nil
}

16
vendor/github.com/cilium/ebpf/internal/io.go generated vendored Normal file
View File

@ -0,0 +1,16 @@
package internal
import "github.com/pkg/errors"
// DiscardZeroes makes sure that all written bytes are zero
// before discarding them.
type DiscardZeroes struct{}
func (DiscardZeroes) Write(p []byte) (int, error) {
for _, b := range p {
if b != 0 {
return 0, errors.New("encountered non-zero byte")
}
}
return len(p), nil
}

26
vendor/github.com/cilium/ebpf/internal/ptr.go generated vendored Normal file
View File

@ -0,0 +1,26 @@
package internal
import "unsafe"
// NewPointer creates a 64-bit pointer from an unsafe Pointer.
func NewPointer(ptr unsafe.Pointer) Pointer {
return Pointer{ptr: ptr}
}
// NewSlicePointer creates a 64-bit pointer from a byte slice.
func NewSlicePointer(buf []byte) Pointer {
if len(buf) == 0 {
return Pointer{}
}
return Pointer{ptr: unsafe.Pointer(&buf[0])}
}
// NewStringPointer creates a 64-bit pointer from a string.
func NewStringPointer(str string) Pointer {
if str == "" {
return Pointer{}
}
return Pointer{ptr: unsafe.Pointer(&[]byte(str)[0])}
}

View File

@ -1,14 +1,14 @@
// +build armbe mips mips64p32 // +build armbe mips mips64p32
package ebpf package internal
import ( import (
"unsafe" "unsafe"
) )
// ptr wraps an unsafe.Pointer to be 64bit to // Pointer wraps an unsafe.Pointer to be 64bit to
// conform to the syscall specification. // conform to the syscall specification.
type syscallPtr struct { type Pointer struct {
pad uint32 pad uint32
ptr unsafe.Pointer ptr unsafe.Pointer
} }

View File

@ -1,14 +1,14 @@
// +build 386 amd64p32 arm mipsle mips64p32le // +build 386 amd64p32 arm mipsle mips64p32le
package ebpf package internal
import ( import (
"unsafe" "unsafe"
) )
// ptr wraps an unsafe.Pointer to be 64bit to // Pointer wraps an unsafe.Pointer to be 64bit to
// conform to the syscall specification. // conform to the syscall specification.
type syscallPtr struct { type Pointer struct {
ptr unsafe.Pointer ptr unsafe.Pointer
pad uint32 pad uint32
} }

View File

@ -1,14 +1,14 @@
// +build !386,!amd64p32,!arm,!mipsle,!mips64p32le // +build !386,!amd64p32,!arm,!mipsle,!mips64p32le
// +build !armbe,!mips,!mips64p32 // +build !armbe,!mips,!mips64p32
package ebpf package internal
import ( import (
"unsafe" "unsafe"
) )
// ptr wraps an unsafe.Pointer to be 64bit to // Pointer wraps an unsafe.Pointer to be 64bit to
// conform to the syscall specification. // conform to the syscall specification.
type syscallPtr struct { type Pointer struct {
ptr unsafe.Pointer ptr unsafe.Pointer
} }

23
vendor/github.com/cilium/ebpf/internal/syscall.go generated vendored Normal file
View File

@ -0,0 +1,23 @@
package internal
import (
"runtime"
"unsafe"
"github.com/cilium/ebpf/internal/unix"
)
// BPF wraps SYS_BPF.
//
// Any pointers contained in attr must use the Pointer type from this package.
func BPF(cmd int, attr unsafe.Pointer, size uintptr) (uintptr, error) {
r1, _, errNo := unix.Syscall(unix.SYS_BPF, uintptr(cmd), uintptr(attr), size)
runtime.KeepAlive(attr)
var err error
if errNo != 0 {
err = errNo
}
return r1, err
}

View File

@ -2,23 +2,35 @@ package ebpf
import ( import (
"github.com/cilium/ebpf/asm" "github.com/cilium/ebpf/asm"
"github.com/cilium/ebpf/internal/btf"
"github.com/pkg/errors"
) )
// link resolves bpf-to-bpf calls. // link resolves bpf-to-bpf calls.
// //
// Each section may contain multiple functions / labels, and is only linked // Each library may contain multiple functions / labels, and is only linked
// if the program being edited references one of these functions. // if the program being edited references one of these functions.
// //
// Sections must not require linking themselves. // Libraries must not require linking themselves.
func link(insns asm.Instructions, sections ...asm.Instructions) (asm.Instructions, error) { func link(prog *ProgramSpec, libs []*ProgramSpec) error {
for _, section := range sections { for _, lib := range libs {
var err error insns, err := linkSection(prog.Instructions, lib.Instructions)
insns, err = linkSection(insns, section)
if err != nil { if err != nil {
return nil, err return errors.Wrapf(err, "linking %s", lib.Name)
}
if len(insns) == len(prog.Instructions) {
continue
}
prog.Instructions = insns
if prog.BTF != nil && lib.BTF != nil {
if err := btf.ProgramAppend(prog.BTF, lib.BTF); err != nil {
return errors.Wrapf(err, "linking BTF of %s", lib.Name)
}
} }
} }
return insns, nil return nil
} }
func linkSection(insns, section asm.Instructions) (asm.Instructions, error) { func linkSection(insns, section asm.Instructions) (asm.Instructions, error) {

83
vendor/github.com/cilium/ebpf/map.go generated vendored
View File

@ -2,9 +2,9 @@ package ebpf
import ( import (
"fmt" "fmt"
"unsafe"
"github.com/cilium/ebpf/internal" "github.com/cilium/ebpf/internal"
"github.com/cilium/ebpf/internal/btf"
"github.com/cilium/ebpf/internal/unix" "github.com/cilium/ebpf/internal/unix"
"github.com/pkg/errors" "github.com/pkg/errors"
@ -20,8 +20,12 @@ type MapSpec struct {
ValueSize uint32 ValueSize uint32
MaxEntries uint32 MaxEntries uint32
Flags uint32 Flags uint32
// InnerMap is used as a template for ArrayOfMaps and HashOfMaps // InnerMap is used as a template for ArrayOfMaps and HashOfMaps
InnerMap *MapSpec InnerMap *MapSpec
// The BTF associated with this map.
BTF *btf.Map
} }
func (ms *MapSpec) String() string { func (ms *MapSpec) String() string {
@ -50,7 +54,7 @@ func (ms *MapSpec) Copy() *MapSpec {
// if you require custom encoding. // if you require custom encoding.
type Map struct { type Map struct {
name string name string
fd *bpfFD fd *internal.FD
abi MapABI abi MapABI
// Per CPU maps return values larger than the size in the spec // Per CPU maps return values larger than the size in the spec
fullValueSize int fullValueSize int
@ -63,11 +67,11 @@ func NewMapFromFD(fd int) (*Map, error) {
if fd < 0 { if fd < 0 {
return nil, errors.New("invalid fd") return nil, errors.New("invalid fd")
} }
bpfFd := newBPFFD(uint32(fd)) bpfFd := internal.NewFD(uint32(fd))
name, abi, err := newMapABIFromFd(bpfFd) name, abi, err := newMapABIFromFd(bpfFd)
if err != nil { if err != nil {
bpfFd.forget() bpfFd.Forget()
return nil, err return nil, err
} }
return newMap(bpfFd, name, abi) return newMap(bpfFd, name, abi)
@ -78,24 +82,37 @@ func NewMapFromFD(fd int) (*Map, error) {
// Creating a map for the first time will perform feature detection // Creating a map for the first time will perform feature detection
// by creating small, temporary maps. // by creating small, temporary maps.
func NewMap(spec *MapSpec) (*Map, error) { func NewMap(spec *MapSpec) (*Map, error) {
if spec.BTF == nil {
return newMapWithBTF(spec, nil)
}
handle, err := btf.NewHandle(btf.MapSpec(spec.BTF))
if err != nil && !btf.IsNotSupported(err) {
return nil, errors.Wrap(err, "can't load BTF")
}
return newMapWithBTF(spec, handle)
}
func newMapWithBTF(spec *MapSpec, handle *btf.Handle) (*Map, error) {
if spec.Type != ArrayOfMaps && spec.Type != HashOfMaps { if spec.Type != ArrayOfMaps && spec.Type != HashOfMaps {
return createMap(spec, nil) return createMap(spec, nil, handle)
} }
if spec.InnerMap == nil { if spec.InnerMap == nil {
return nil, errors.Errorf("%s requires InnerMap", spec.Type) return nil, errors.Errorf("%s requires InnerMap", spec.Type)
} }
template, err := createMap(spec.InnerMap, nil) template, err := createMap(spec.InnerMap, nil, handle)
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer template.Close() defer template.Close()
return createMap(spec, template.fd) return createMap(spec, template.fd, handle)
} }
func createMap(spec *MapSpec, inner *bpfFD) (*Map, error) { func createMap(spec *MapSpec, inner *internal.FD, handle *btf.Handle) (*Map, error) {
spec = spec.Copy() spec = spec.Copy()
switch spec.Type { switch spec.Type {
@ -140,12 +157,18 @@ func createMap(spec *MapSpec, inner *bpfFD) (*Map, error) {
if inner != nil { if inner != nil {
var err error var err error
attr.innerMapFd, err = inner.value() attr.innerMapFd, err = inner.Value()
if err != nil { if err != nil {
return nil, errors.Wrap(err, "map create") return nil, errors.Wrap(err, "map create")
} }
} }
if handle != nil && spec.BTF != nil {
attr.btfFd = uint32(handle.FD())
attr.btfKeyTypeID = btf.MapKey(spec.BTF).ID()
attr.btfValueTypeID = btf.MapValue(spec.BTF).ID()
}
name, err := newBPFObjName(spec.Name) name, err := newBPFObjName(spec.Name)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "map create") return nil, errors.Wrap(err, "map create")
@ -163,7 +186,7 @@ func createMap(spec *MapSpec, inner *bpfFD) (*Map, error) {
return newMap(fd, spec.Name, newMapABIFromSpec(spec)) return newMap(fd, spec.Name, newMapABIFromSpec(spec))
} }
func newMap(fd *bpfFD, name string, abi *MapABI) (*Map, error) { func newMap(fd *internal.FD, name string, abi *MapABI) (*Map, error) {
m := &Map{ m := &Map{
name, name,
fd, fd,
@ -251,12 +274,28 @@ func (m *Map) Lookup(key, valueOut interface{}) error {
} }
} }
// LookupAndDelete retrieves and deletes a value from a Map.
func (m *Map) LookupAndDelete(key, valueOut interface{}) error {
valuePtr, valueBytes := makeBuffer(valueOut, m.fullValueSize)
keyPtr, err := marshalPtr(key, int(m.abi.KeySize))
if err != nil {
return errors.WithMessage(err, "can't marshal key")
}
if err := bpfMapLookupAndDelete(m.fd, keyPtr, valuePtr); err != nil {
return errors.WithMessage(err, "lookup and delete and delete failed")
}
return unmarshalBytes(valueOut, valueBytes)
}
// LookupBytes gets a value from Map. // LookupBytes gets a value from Map.
// //
// Returns a nil value if a key doesn't exist. // Returns a nil value if a key doesn't exist.
func (m *Map) LookupBytes(key interface{}) ([]byte, error) { func (m *Map) LookupBytes(key interface{}) ([]byte, error) {
valueBytes := make([]byte, m.fullValueSize) valueBytes := make([]byte, m.fullValueSize)
valuePtr := newPtr(unsafe.Pointer(&valueBytes[0])) valuePtr := internal.NewSlicePointer(valueBytes)
err := m.lookup(key, valuePtr) err := m.lookup(key, valuePtr)
if IsNotExist(err) { if IsNotExist(err) {
@ -266,7 +305,7 @@ func (m *Map) LookupBytes(key interface{}) ([]byte, error) {
return valueBytes, err return valueBytes, err
} }
func (m *Map) lookup(key interface{}, valueOut syscallPtr) error { func (m *Map) lookup(key interface{}, valueOut internal.Pointer) error {
keyPtr, err := marshalPtr(key, int(m.abi.KeySize)) keyPtr, err := marshalPtr(key, int(m.abi.KeySize))
if err != nil { if err != nil {
return errors.WithMessage(err, "can't marshal key") return errors.WithMessage(err, "can't marshal key")
@ -304,7 +343,7 @@ func (m *Map) Update(key, value interface{}, flags MapUpdateFlags) error {
return errors.WithMessage(err, "can't marshal key") return errors.WithMessage(err, "can't marshal key")
} }
var valuePtr syscallPtr var valuePtr internal.Pointer
if m.abi.Type.hasPerCPUValue() { if m.abi.Type.hasPerCPUValue() {
valuePtr, err = marshalPerCPUValue(value, int(m.abi.ValueSize)) valuePtr, err = marshalPerCPUValue(value, int(m.abi.ValueSize))
} else { } else {
@ -355,7 +394,7 @@ func (m *Map) NextKey(key, nextKeyOut interface{}) error {
// Use Iterate if you want to traverse all entries in the map. // Use Iterate if you want to traverse all entries in the map.
func (m *Map) NextKeyBytes(key interface{}) ([]byte, error) { func (m *Map) NextKeyBytes(key interface{}) ([]byte, error) {
nextKey := make([]byte, m.abi.KeySize) nextKey := make([]byte, m.abi.KeySize)
nextKeyPtr := newPtr(unsafe.Pointer(&nextKey[0])) nextKeyPtr := internal.NewSlicePointer(nextKey)
err := m.nextKey(key, nextKeyPtr) err := m.nextKey(key, nextKeyPtr)
if IsNotExist(err) { if IsNotExist(err) {
@ -365,9 +404,9 @@ func (m *Map) NextKeyBytes(key interface{}) ([]byte, error) {
return nextKey, err return nextKey, err
} }
func (m *Map) nextKey(key interface{}, nextKeyOut syscallPtr) error { func (m *Map) nextKey(key interface{}, nextKeyOut internal.Pointer) error {
var ( var (
keyPtr syscallPtr keyPtr internal.Pointer
err error err error
) )
@ -400,14 +439,14 @@ func (m *Map) Close() error {
return nil return nil
} }
return m.fd.close() return m.fd.Close()
} }
// FD gets the file descriptor of the Map. // FD gets the file descriptor of the Map.
// //
// Calling this function is invalid after Close has been called. // Calling this function is invalid after Close has been called.
func (m *Map) FD() int { func (m *Map) FD() int {
fd, err := m.fd.value() fd, err := m.fd.Value()
if err != nil { if err != nil {
// Best effort: -1 is the number most likely to be an // Best effort: -1 is the number most likely to be an
// invalid file descriptor. // invalid file descriptor.
@ -428,7 +467,7 @@ func (m *Map) Clone() (*Map, error) {
return nil, nil return nil, nil
} }
dup, err := m.fd.dup() dup, err := m.fd.Dup()
if err != nil { if err != nil {
return nil, errors.Wrap(err, "can't clone map") return nil, errors.Wrap(err, "can't clone map")
} }
@ -454,7 +493,7 @@ func LoadPinnedMap(fileName string) (*Map, error) {
} }
name, abi, err := newMapABIFromFd(fd) name, abi, err := newMapABIFromFd(fd)
if err != nil { if err != nil {
_ = fd.close() _ = fd.Close()
return nil, err return nil, err
} }
return newMap(fd, name, abi) return newMap(fd, name, abi)
@ -484,7 +523,7 @@ func unmarshalMap(buf []byte) (*Map, error) {
name, abi, err := newMapABIFromFd(fd) name, abi, err := newMapABIFromFd(fd)
if err != nil { if err != nil {
_ = fd.close() _ = fd.Close()
return nil, err return nil, err
} }
@ -493,7 +532,7 @@ func unmarshalMap(buf []byte) (*Map, error) {
// MarshalBinary implements BinaryMarshaler. // MarshalBinary implements BinaryMarshaler.
func (m *Map) MarshalBinary() ([]byte, error) { func (m *Map) MarshalBinary() ([]byte, error) {
fd, err := m.fd.value() fd, err := m.fd.Value()
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -13,17 +13,24 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
) )
func marshalPtr(data interface{}, length int) (syscallPtr, error) { func marshalPtr(data interface{}, length int) (internal.Pointer, error) {
if data == nil {
if length == 0 {
return internal.NewPointer(nil), nil
}
return internal.Pointer{}, errors.New("can't use nil as key of map")
}
if ptr, ok := data.(unsafe.Pointer); ok { if ptr, ok := data.(unsafe.Pointer); ok {
return newPtr(ptr), nil return internal.NewPointer(ptr), nil
} }
buf, err := marshalBytes(data, length) buf, err := marshalBytes(data, length)
if err != nil { if err != nil {
return syscallPtr{}, err return internal.Pointer{}, err
} }
return newPtr(unsafe.Pointer(&buf[0])), nil return internal.NewSlicePointer(buf), nil
} }
func marshalBytes(data interface{}, length int) (buf []byte, err error) { func marshalBytes(data interface{}, length int) (buf []byte, err error) {
@ -52,13 +59,13 @@ func marshalBytes(data interface{}, length int) (buf []byte, err error) {
return buf, nil return buf, nil
} }
func makeBuffer(dst interface{}, length int) (syscallPtr, []byte) { func makeBuffer(dst interface{}, length int) (internal.Pointer, []byte) {
if ptr, ok := dst.(unsafe.Pointer); ok { if ptr, ok := dst.(unsafe.Pointer); ok {
return newPtr(ptr), nil return internal.NewPointer(ptr), nil
} }
buf := make([]byte, length) buf := make([]byte, length)
return newPtr(unsafe.Pointer(&buf[0])), buf return internal.NewSlicePointer(buf), buf
} }
func unmarshalBytes(data interface{}, buf []byte) error { func unmarshalBytes(data interface{}, buf []byte) error {
@ -99,21 +106,21 @@ func unmarshalBytes(data interface{}, buf []byte) error {
// Values are initialized to zero if the slice has less elements than CPUs. // Values are initialized to zero if the slice has less elements than CPUs.
// //
// slice must have a type like []elementType. // slice must have a type like []elementType.
func marshalPerCPUValue(slice interface{}, elemLength int) (syscallPtr, error) { func marshalPerCPUValue(slice interface{}, elemLength int) (internal.Pointer, error) {
sliceType := reflect.TypeOf(slice) sliceType := reflect.TypeOf(slice)
if sliceType.Kind() != reflect.Slice { if sliceType.Kind() != reflect.Slice {
return syscallPtr{}, errors.New("per-CPU value requires slice") return internal.Pointer{}, errors.New("per-CPU value requires slice")
} }
possibleCPUs, err := internal.PossibleCPUs() possibleCPUs, err := internal.PossibleCPUs()
if err != nil { if err != nil {
return syscallPtr{}, err return internal.Pointer{}, err
} }
sliceValue := reflect.ValueOf(slice) sliceValue := reflect.ValueOf(slice)
sliceLen := sliceValue.Len() sliceLen := sliceValue.Len()
if sliceLen > possibleCPUs { if sliceLen > possibleCPUs {
return syscallPtr{}, errors.Errorf("per-CPU value exceeds number of CPUs") return internal.Pointer{}, errors.Errorf("per-CPU value exceeds number of CPUs")
} }
alignedElemLength := align(elemLength, 8) alignedElemLength := align(elemLength, 8)
@ -123,14 +130,14 @@ func marshalPerCPUValue(slice interface{}, elemLength int) (syscallPtr, error) {
elem := sliceValue.Index(i).Interface() elem := sliceValue.Index(i).Interface()
elemBytes, err := marshalBytes(elem, elemLength) elemBytes, err := marshalBytes(elem, elemLength)
if err != nil { if err != nil {
return syscallPtr{}, err return internal.Pointer{}, err
} }
offset := i * alignedElemLength offset := i * alignedElemLength
copy(buf[offset:offset+elemLength], elemBytes) copy(buf[offset:offset+elemLength], elemBytes)
} }
return newPtr(unsafe.Pointer(&buf[0])), nil return internal.NewSlicePointer(buf), nil
} }
// unmarshalPerCPUValue decodes a buffer into a slice containing one value per // unmarshalPerCPUValue decodes a buffer into a slice containing one value per

120
vendor/github.com/cilium/ebpf/prog.go generated vendored
View File

@ -10,6 +10,7 @@ import (
"github.com/cilium/ebpf/asm" "github.com/cilium/ebpf/asm"
"github.com/cilium/ebpf/internal" "github.com/cilium/ebpf/internal"
"github.com/cilium/ebpf/internal/btf"
"github.com/cilium/ebpf/internal/unix" "github.com/cilium/ebpf/internal/unix"
"github.com/pkg/errors" "github.com/pkg/errors"
@ -46,6 +47,11 @@ type ProgramSpec struct {
Instructions asm.Instructions Instructions asm.Instructions
License string License string
KernelVersion uint32 KernelVersion uint32
// The BTF associated with this program. Changing Instructions
// will most likely invalidate the contained data, and may
// result in errors when attempting to load it into the kernel.
BTF *btf.Program
} }
// Copy returns a copy of the spec. // Copy returns a copy of the spec.
@ -68,7 +74,7 @@ type Program struct {
// otherwise it is empty. // otherwise it is empty.
VerifierLog string VerifierLog string
fd *bpfFD fd *internal.FD
name string name string
abi ProgramABI abi ProgramABI
} }
@ -86,7 +92,20 @@ func NewProgram(spec *ProgramSpec) (*Program, error) {
// Loading a program for the first time will perform // Loading a program for the first time will perform
// feature detection by loading small, temporary programs. // feature detection by loading small, temporary programs.
func NewProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, error) { func NewProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, error) {
attr, err := convertProgramSpec(spec) if spec.BTF == nil {
return newProgramWithBTF(spec, nil, opts)
}
handle, err := btf.NewHandle(btf.ProgramSpec(spec.BTF))
if err != nil && !btf.IsNotSupported(err) {
return nil, errors.Wrap(err, "can't load BTF")
}
return newProgramWithBTF(spec, handle, opts)
}
func newProgramWithBTF(spec *ProgramSpec, btf *btf.Handle, opts ProgramOptions) (*Program, error) {
attr, err := convertProgramSpec(spec, btf)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -101,34 +120,28 @@ func NewProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, er
logBuf = make([]byte, logSize) logBuf = make([]byte, logSize)
attr.logLevel = opts.LogLevel attr.logLevel = opts.LogLevel
attr.logSize = uint32(len(logBuf)) attr.logSize = uint32(len(logBuf))
attr.logBuf = newPtr(unsafe.Pointer(&logBuf[0])) attr.logBuf = internal.NewSlicePointer(logBuf)
} }
fd, err := bpfProgLoad(attr) fd, err := bpfProgLoad(attr)
if err == nil { if err == nil {
prog := newProgram(fd, spec.Name, &ProgramABI{spec.Type}) prog := newProgram(fd, spec.Name, &ProgramABI{spec.Type})
prog.VerifierLog = convertCString(logBuf) prog.VerifierLog = internal.CString(logBuf)
return prog, nil return prog, nil
} }
truncated := errors.Cause(err) == unix.ENOSPC
if opts.LogLevel == 0 { if opts.LogLevel == 0 {
// Re-run with the verifier enabled to get better error messages. // Re-run with the verifier enabled to get better error messages.
logBuf = make([]byte, logSize) logBuf = make([]byte, logSize)
attr.logLevel = 1 attr.logLevel = 1
attr.logSize = uint32(len(logBuf)) attr.logSize = uint32(len(logBuf))
attr.logBuf = newPtr(unsafe.Pointer(&logBuf[0])) attr.logBuf = internal.NewSlicePointer(logBuf)
_, nerr := bpfProgLoad(attr) _, logErr := bpfProgLoad(attr)
truncated = errors.Cause(nerr) == unix.ENOSPC err = internal.ErrorWithLog(err, logBuf, logErr)
} }
logs := convertCString(logBuf) return nil, errors.Wrap(err, "can't load program")
if truncated {
logs += "\n(truncated...)"
}
return nil, &loadError{err, logs}
} }
// NewProgramFromFD creates a program from a raw fd. // NewProgramFromFD creates a program from a raw fd.
@ -140,18 +153,18 @@ func NewProgramFromFD(fd int) (*Program, error) {
if fd < 0 { if fd < 0 {
return nil, errors.New("invalid fd") return nil, errors.New("invalid fd")
} }
bpfFd := newBPFFD(uint32(fd)) bpfFd := internal.NewFD(uint32(fd))
name, abi, err := newProgramABIFromFd(bpfFd) name, abi, err := newProgramABIFromFd(bpfFd)
if err != nil { if err != nil {
bpfFd.forget() bpfFd.Forget()
return nil, err return nil, err
} }
return newProgram(bpfFd, name, abi), nil return newProgram(bpfFd, name, abi), nil
} }
func newProgram(fd *bpfFD, name string, abi *ProgramABI) *Program { func newProgram(fd *internal.FD, name string, abi *ProgramABI) *Program {
return &Program{ return &Program{
name: name, name: name,
fd: fd, fd: fd,
@ -159,7 +172,7 @@ func newProgram(fd *bpfFD, name string, abi *ProgramABI) *Program {
} }
} }
func convertProgramSpec(spec *ProgramSpec) (*bpfProgLoadAttr, error) { func convertProgramSpec(spec *ProgramSpec, handle *btf.Handle) (*bpfProgLoadAttr, error) {
if len(spec.Instructions) == 0 { if len(spec.Instructions) == 0 {
return nil, errors.New("Instructions cannot be empty") return nil, errors.New("Instructions cannot be empty")
} }
@ -176,13 +189,12 @@ func convertProgramSpec(spec *ProgramSpec) (*bpfProgLoadAttr, error) {
bytecode := buf.Bytes() bytecode := buf.Bytes()
insCount := uint32(len(bytecode) / asm.InstructionSize) insCount := uint32(len(bytecode) / asm.InstructionSize)
lic := []byte(spec.License)
attr := &bpfProgLoadAttr{ attr := &bpfProgLoadAttr{
progType: spec.Type, progType: spec.Type,
expectedAttachType: spec.AttachType, expectedAttachType: spec.AttachType,
insCount: insCount, insCount: insCount,
instructions: newPtr(unsafe.Pointer(&bytecode[0])), instructions: internal.NewSlicePointer(bytecode),
license: newPtr(unsafe.Pointer(&lic[0])), license: internal.NewStringPointer(spec.License),
} }
name, err := newBPFObjName(spec.Name) name, err := newBPFObjName(spec.Name)
@ -194,6 +206,26 @@ func convertProgramSpec(spec *ProgramSpec) (*bpfProgLoadAttr, error) {
attr.progName = name attr.progName = name
} }
if handle != nil && spec.BTF != nil {
attr.progBTFFd = uint32(handle.FD())
recSize, bytes, err := btf.ProgramLineInfos(spec.BTF)
if err != nil {
return nil, errors.Wrap(err, "can't get BTF line infos")
}
attr.lineInfoRecSize = recSize
attr.lineInfoCnt = uint32(uint64(len(bytes)) / uint64(recSize))
attr.lineInfo = internal.NewSlicePointer(bytes)
recSize, bytes, err = btf.ProgramFuncInfos(spec.BTF)
if err != nil {
return nil, errors.Wrap(err, "can't get BTF function infos")
}
attr.funcInfoRecSize = recSize
attr.funcInfoCnt = uint32(uint64(len(bytes)) / uint64(recSize))
attr.funcInfo = internal.NewSlicePointer(bytes)
}
return attr, nil return attr, nil
} }
@ -213,7 +245,7 @@ func (p *Program) ABI() ProgramABI {
// //
// It is invalid to call this function after Close has been called. // It is invalid to call this function after Close has been called.
func (p *Program) FD() int { func (p *Program) FD() int {
fd, err := p.fd.value() fd, err := p.fd.Value()
if err != nil { if err != nil {
// Best effort: -1 is the number most likely to be an // Best effort: -1 is the number most likely to be an
// invalid file descriptor. // invalid file descriptor.
@ -233,7 +265,7 @@ func (p *Program) Clone() (*Program, error) {
return nil, nil return nil, nil
} }
dup, err := p.fd.dup() dup, err := p.fd.Dup()
if err != nil { if err != nil {
return nil, errors.Wrap(err, "can't clone program") return nil, errors.Wrap(err, "can't clone program")
} }
@ -254,7 +286,7 @@ func (p *Program) Close() error {
return nil return nil
} }
return p.fd.close() return p.fd.Close()
} }
// Test runs the Program in the kernel with the given input and returns the // Test runs the Program in the kernel with the given input and returns the
@ -296,7 +328,7 @@ var haveProgTestRun = internal.FeatureTest("BPF_PROG_TEST_RUN", "4.12", func() b
} }
defer prog.Close() defer prog.Close()
fd, err := prog.fd.value() fd, err := prog.fd.Value()
if err != nil { if err != nil {
return false return false
} }
@ -306,10 +338,10 @@ var haveProgTestRun = internal.FeatureTest("BPF_PROG_TEST_RUN", "4.12", func() b
attr := bpfProgTestRunAttr{ attr := bpfProgTestRunAttr{
fd: fd, fd: fd,
dataSizeIn: uint32(len(in)), dataSizeIn: uint32(len(in)),
dataIn: newPtr(unsafe.Pointer(&in[0])), dataIn: internal.NewSlicePointer(in),
} }
_, err = bpfCall(_ProgTestRun, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) _, err = internal.BPF(_ProgTestRun, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
// Check for EINVAL specifically, rather than err != nil since we // Check for EINVAL specifically, rather than err != nil since we
// otherwise misdetect due to insufficient permissions. // otherwise misdetect due to insufficient permissions.
@ -340,7 +372,7 @@ func (p *Program) testRun(in []byte, repeat int) (uint32, []byte, time.Duration,
// See https://patchwork.ozlabs.org/cover/1006822/ // See https://patchwork.ozlabs.org/cover/1006822/
out := make([]byte, len(in)+outputPad) out := make([]byte, len(in)+outputPad)
fd, err := p.fd.value() fd, err := p.fd.Value()
if err != nil { if err != nil {
return 0, nil, 0, err return 0, nil, 0, err
} }
@ -349,12 +381,12 @@ func (p *Program) testRun(in []byte, repeat int) (uint32, []byte, time.Duration,
fd: fd, fd: fd,
dataSizeIn: uint32(len(in)), dataSizeIn: uint32(len(in)),
dataSizeOut: uint32(len(out)), dataSizeOut: uint32(len(out)),
dataIn: newPtr(unsafe.Pointer(&in[0])), dataIn: internal.NewSlicePointer(in),
dataOut: newPtr(unsafe.Pointer(&out[0])), dataOut: internal.NewSlicePointer(out),
repeat: uint32(repeat), repeat: uint32(repeat),
} }
_, err = bpfCall(_ProgTestRun, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) _, err = internal.BPF(_ProgTestRun, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
if err != nil { if err != nil {
return 0, nil, 0, errors.Wrap(err, "can't run test") return 0, nil, 0, errors.Wrap(err, "can't run test")
} }
@ -385,7 +417,7 @@ func unmarshalProgram(buf []byte) (*Program, error) {
name, abi, err := newProgramABIFromFd(fd) name, abi, err := newProgramABIFromFd(fd)
if err != nil { if err != nil {
_ = fd.close() _ = fd.Close()
return nil, err return nil, err
} }
@ -394,7 +426,7 @@ func unmarshalProgram(buf []byte) (*Program, error) {
// MarshalBinary implements BinaryMarshaler. // MarshalBinary implements BinaryMarshaler.
func (p *Program) MarshalBinary() ([]byte, error) { func (p *Program) MarshalBinary() ([]byte, error) {
value, err := p.fd.value() value, err := p.fd.Value()
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -410,7 +442,7 @@ func (p *Program) Attach(fd int, typ AttachType, flags AttachFlags) error {
return errors.New("invalid fd") return errors.New("invalid fd")
} }
pfd, err := p.fd.value() pfd, err := p.fd.Value()
if err != nil { if err != nil {
return err return err
} }
@ -431,7 +463,7 @@ func (p *Program) Detach(fd int, typ AttachType, flags AttachFlags) error {
return errors.New("invalid fd") return errors.New("invalid fd")
} }
pfd, err := p.fd.value() pfd, err := p.fd.Value()
if err != nil { if err != nil {
return err return err
} }
@ -457,7 +489,7 @@ func LoadPinnedProgram(fileName string) (*Program, error) {
name, abi, err := newProgramABIFromFd(fd) name, abi, err := newProgramABIFromFd(fd)
if err != nil { if err != nil {
_ = fd.close() _ = fd.Close()
return nil, errors.Wrapf(err, "can't get ABI for %s", fileName) return nil, errors.Wrapf(err, "can't get ABI for %s", fileName)
} }
@ -480,22 +512,6 @@ func SanitizeName(name string, replacement rune) string {
}, name) }, name)
} }
type loadError struct {
cause error
verifierLog string
}
func (le *loadError) Error() string {
if le.verifierLog == "" {
return fmt.Sprintf("failed to load program: %s", le.cause)
}
return fmt.Sprintf("failed to load program: %s: %s", le.cause, le.verifierLog)
}
func (le *loadError) Cause() error {
return le.cause
}
// IsNotSupported returns true if an error occurred because // IsNotSupported returns true if an error occurred because
// the kernel does not have support for a specific feature. // the kernel does not have support for a specific feature.
func IsNotSupported(err error) bool { func IsNotSupported(err error) bool {

View File

@ -1,72 +1,17 @@
package ebpf package ebpf
import ( import (
"bytes"
"path/filepath" "path/filepath"
"runtime"
"strconv"
"strings" "strings"
"unsafe" "unsafe"
"github.com/cilium/ebpf/internal" "github.com/cilium/ebpf/internal"
"github.com/cilium/ebpf/internal/btf"
"github.com/cilium/ebpf/internal/unix" "github.com/cilium/ebpf/internal/unix"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
var errClosedFd = errors.New("use of closed file descriptor")
type bpfFD struct {
raw int64
}
func newBPFFD(value uint32) *bpfFD {
fd := &bpfFD{int64(value)}
runtime.SetFinalizer(fd, (*bpfFD).close)
return fd
}
func (fd *bpfFD) String() string {
return strconv.FormatInt(fd.raw, 10)
}
func (fd *bpfFD) value() (uint32, error) {
if fd.raw < 0 {
return 0, errClosedFd
}
return uint32(fd.raw), nil
}
func (fd *bpfFD) close() error {
if fd.raw < 0 {
return nil
}
value := int(fd.raw)
fd.raw = -1
fd.forget()
return unix.Close(value)
}
func (fd *bpfFD) forget() {
runtime.SetFinalizer(fd, nil)
}
func (fd *bpfFD) dup() (*bpfFD, error) {
if fd.raw < 0 {
return nil, errClosedFd
}
dup, err := unix.FcntlInt(uintptr(fd.raw), unix.F_DUPFD_CLOEXEC, 0)
if err != nil {
return nil, errors.Wrap(err, "can't dup fd")
}
return newBPFFD(uint32(dup)), nil
}
// bpfObjName is a null-terminated string made up of // bpfObjName is a null-terminated string made up of
// 'A-Za-z0-9_' characters. // 'A-Za-z0-9_' characters.
type bpfObjName [unix.BPF_OBJ_NAME_LEN]byte type bpfObjName [unix.BPF_OBJ_NAME_LEN]byte
@ -99,21 +44,25 @@ func invalidBPFObjNameChar(char rune) bool {
} }
type bpfMapCreateAttr struct { type bpfMapCreateAttr struct {
mapType MapType mapType MapType
keySize uint32 keySize uint32
valueSize uint32 valueSize uint32
maxEntries uint32 maxEntries uint32
flags uint32 flags uint32
innerMapFd uint32 // since 4.12 56f668dfe00d innerMapFd uint32 // since 4.12 56f668dfe00d
numaNode uint32 // since 4.14 96eabe7a40aa numaNode uint32 // since 4.14 96eabe7a40aa
mapName bpfObjName // since 4.15 ad5b177bd73f mapName bpfObjName // since 4.15 ad5b177bd73f
mapIfIndex uint32
btfFd uint32
btfKeyTypeID btf.TypeID
btfValueTypeID btf.TypeID
} }
type bpfMapOpAttr struct { type bpfMapOpAttr struct {
mapFd uint32 mapFd uint32
padding uint32 padding uint32
key syscallPtr key internal.Pointer
value syscallPtr value internal.Pointer
flags uint64 flags uint64
} }
@ -128,7 +77,7 @@ type bpfMapInfo struct {
} }
type bpfPinObjAttr struct { type bpfPinObjAttr struct {
fileName syscallPtr fileName internal.Pointer
fd uint32 fd uint32
padding uint32 padding uint32
} }
@ -136,16 +85,23 @@ type bpfPinObjAttr struct {
type bpfProgLoadAttr struct { type bpfProgLoadAttr struct {
progType ProgramType progType ProgramType
insCount uint32 insCount uint32
instructions syscallPtr instructions internal.Pointer
license syscallPtr license internal.Pointer
logLevel uint32 logLevel uint32
logSize uint32 logSize uint32
logBuf syscallPtr logBuf internal.Pointer
kernelVersion uint32 // since 4.1 2541517c32be kernelVersion uint32 // since 4.1 2541517c32be
progFlags uint32 // since 4.11 e07b98d9bffe progFlags uint32 // since 4.11 e07b98d9bffe
progName bpfObjName // since 4.15 067cae47771c progName bpfObjName // since 4.15 067cae47771c
progIfIndex uint32 // since 4.15 1f6f4cb7ba21 progIfIndex uint32 // since 4.15 1f6f4cb7ba21
expectedAttachType AttachType // since 4.17 5e43f899b03a expectedAttachType AttachType // since 4.17 5e43f899b03a
progBTFFd uint32
funcInfoRecSize uint32
funcInfo internal.Pointer
funcInfoCnt uint32
lineInfoRecSize uint32
lineInfo internal.Pointer
lineInfoCnt uint32
} }
type bpfProgInfo struct { type bpfProgInfo struct {
@ -154,12 +110,12 @@ type bpfProgInfo struct {
tag [unix.BPF_TAG_SIZE]byte tag [unix.BPF_TAG_SIZE]byte
jitedLen uint32 jitedLen uint32
xlatedLen uint32 xlatedLen uint32
jited syscallPtr jited internal.Pointer
xlated syscallPtr xlated internal.Pointer
loadTime uint64 // since 4.15 cb4d2b3f03d8 loadTime uint64 // since 4.15 cb4d2b3f03d8
createdByUID uint32 createdByUID uint32
nrMapIDs uint32 nrMapIDs uint32
mapIds syscallPtr mapIds internal.Pointer
name bpfObjName name bpfObjName
} }
@ -168,8 +124,8 @@ type bpfProgTestRunAttr struct {
retval uint32 retval uint32
dataSizeIn uint32 dataSizeIn uint32
dataSizeOut uint32 dataSizeOut uint32
dataIn syscallPtr dataIn internal.Pointer
dataOut syscallPtr dataOut internal.Pointer
repeat uint32 repeat uint32
duration uint32 duration uint32
} }
@ -184,7 +140,7 @@ type bpfProgAlterAttr struct {
type bpfObjGetInfoByFDAttr struct { type bpfObjGetInfoByFDAttr struct {
fd uint32 fd uint32
infoLen uint32 infoLen uint32
info syscallPtr // May be either bpfMapInfo or bpfProgInfo info internal.Pointer // May be either bpfMapInfo or bpfProgInfo
} }
type bpfGetFDByIDAttr struct { type bpfGetFDByIDAttr struct {
@ -192,13 +148,9 @@ type bpfGetFDByIDAttr struct {
next uint32 next uint32
} }
func newPtr(ptr unsafe.Pointer) syscallPtr { func bpfProgLoad(attr *bpfProgLoadAttr) (*internal.FD, error) {
return syscallPtr{ptr: ptr}
}
func bpfProgLoad(attr *bpfProgLoadAttr) (*bpfFD, error) {
for { for {
fd, err := bpfCall(_ProgLoad, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) fd, err := internal.BPF(_ProgLoad, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
// As of ~4.20 the verifier can be interrupted by a signal, // As of ~4.20 the verifier can be interrupted by a signal,
// and returns EAGAIN in that case. // and returns EAGAIN in that case.
if err == unix.EAGAIN { if err == unix.EAGAIN {
@ -209,22 +161,22 @@ func bpfProgLoad(attr *bpfProgLoadAttr) (*bpfFD, error) {
return nil, err return nil, err
} }
return newBPFFD(uint32(fd)), nil return internal.NewFD(uint32(fd)), nil
} }
} }
func bpfProgAlter(cmd int, attr *bpfProgAlterAttr) error { func bpfProgAlter(cmd int, attr *bpfProgAlterAttr) error {
_, err := bpfCall(cmd, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) _, err := internal.BPF(cmd, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
return err return err
} }
func bpfMapCreate(attr *bpfMapCreateAttr) (*bpfFD, error) { func bpfMapCreate(attr *bpfMapCreateAttr) (*internal.FD, error) {
fd, err := bpfCall(_MapCreate, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) fd, err := internal.BPF(_MapCreate, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
if err != nil { if err != nil {
return nil, err return nil, err
} }
return newBPFFD(uint32(fd)), nil return internal.NewFD(uint32(fd)), nil
} }
var haveNestedMaps = internal.FeatureTest("nested maps", "4.12", func() bool { var haveNestedMaps = internal.FeatureTest("nested maps", "4.12", func() bool {
@ -237,9 +189,9 @@ var haveNestedMaps = internal.FeatureTest("nested maps", "4.12", func() bool {
if err != nil { if err != nil {
return false return false
} }
defer inner.close() defer inner.Close()
innerFd, _ := inner.value() innerFd, _ := inner.Value()
nested, err := bpfMapCreate(&bpfMapCreateAttr{ nested, err := bpfMapCreate(&bpfMapCreateAttr{
mapType: ArrayOfMaps, mapType: ArrayOfMaps,
keySize: 4, keySize: 4,
@ -251,12 +203,12 @@ var haveNestedMaps = internal.FeatureTest("nested maps", "4.12", func() bool {
return false return false
} }
_ = nested.close() _ = nested.Close()
return true return true
}) })
func bpfMapLookupElem(m *bpfFD, key, valueOut syscallPtr) error { func bpfMapLookupElem(m *internal.FD, key, valueOut internal.Pointer) error {
fd, err := m.value() fd, err := m.Value()
if err != nil { if err != nil {
return err return err
} }
@ -266,12 +218,27 @@ func bpfMapLookupElem(m *bpfFD, key, valueOut syscallPtr) error {
key: key, key: key,
value: valueOut, value: valueOut,
} }
_, err = bpfCall(_MapLookupElem, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) _, err = internal.BPF(_MapLookupElem, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
return err return err
} }
func bpfMapUpdateElem(m *bpfFD, key, valueOut syscallPtr, flags uint64) error { func bpfMapLookupAndDelete(m *internal.FD, key, valueOut internal.Pointer) error {
fd, err := m.value() fd, err := m.Value()
if err != nil {
return err
}
attr := bpfMapOpAttr{
mapFd: fd,
key: key,
value: valueOut,
}
_, err = internal.BPF(_MapLookupAndDeleteElem, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
return err
}
func bpfMapUpdateElem(m *internal.FD, key, valueOut internal.Pointer, flags uint64) error {
fd, err := m.Value()
if err != nil { if err != nil {
return err return err
} }
@ -282,12 +249,12 @@ func bpfMapUpdateElem(m *bpfFD, key, valueOut syscallPtr, flags uint64) error {
value: valueOut, value: valueOut,
flags: flags, flags: flags,
} }
_, err = bpfCall(_MapUpdateElem, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) _, err = internal.BPF(_MapUpdateElem, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
return err return err
} }
func bpfMapDeleteElem(m *bpfFD, key syscallPtr) error { func bpfMapDeleteElem(m *internal.FD, key internal.Pointer) error {
fd, err := m.value() fd, err := m.Value()
if err != nil { if err != nil {
return err return err
} }
@ -296,12 +263,12 @@ func bpfMapDeleteElem(m *bpfFD, key syscallPtr) error {
mapFd: fd, mapFd: fd,
key: key, key: key,
} }
_, err = bpfCall(_MapDeleteElem, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) _, err = internal.BPF(_MapDeleteElem, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
return err return err
} }
func bpfMapGetNextKey(m *bpfFD, key, nextKeyOut syscallPtr) error { func bpfMapGetNextKey(m *internal.FD, key, nextKeyOut internal.Pointer) error {
fd, err := m.value() fd, err := m.Value()
if err != nil { if err != nil {
return err return err
} }
@ -311,13 +278,13 @@ func bpfMapGetNextKey(m *bpfFD, key, nextKeyOut syscallPtr) error {
key: key, key: key,
value: nextKeyOut, value: nextKeyOut,
} }
_, err = bpfCall(_MapGetNextKey, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) _, err = internal.BPF(_MapGetNextKey, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
return err return err
} }
const bpfFSType = 0xcafe4a11 const bpfFSType = 0xcafe4a11
func bpfPinObject(fileName string, fd *bpfFD) error { func bpfPinObject(fileName string, fd *internal.FD) error {
dirName := filepath.Dir(fileName) dirName := filepath.Dir(fileName)
var statfs unix.Statfs_t var statfs unix.Statfs_t
if err := unix.Statfs(dirName, &statfs); err != nil { if err := unix.Statfs(dirName, &statfs); err != nil {
@ -327,30 +294,30 @@ func bpfPinObject(fileName string, fd *bpfFD) error {
return errors.Errorf("%s is not on a bpf filesystem", fileName) return errors.Errorf("%s is not on a bpf filesystem", fileName)
} }
value, err := fd.value() value, err := fd.Value()
if err != nil { if err != nil {
return err return err
} }
_, err = bpfCall(_ObjPin, unsafe.Pointer(&bpfPinObjAttr{ _, err = internal.BPF(_ObjPin, unsafe.Pointer(&bpfPinObjAttr{
fileName: newPtr(unsafe.Pointer(&[]byte(fileName)[0])), fileName: internal.NewStringPointer(fileName),
fd: value, fd: value,
}), 16) }), 16)
return errors.Wrapf(err, "pin object %s", fileName) return errors.Wrapf(err, "pin object %s", fileName)
} }
func bpfGetObject(fileName string) (*bpfFD, error) { func bpfGetObject(fileName string) (*internal.FD, error) {
ptr, err := bpfCall(_ObjGet, unsafe.Pointer(&bpfPinObjAttr{ ptr, err := internal.BPF(_ObjGet, unsafe.Pointer(&bpfPinObjAttr{
fileName: newPtr(unsafe.Pointer(&[]byte(fileName)[0])), fileName: internal.NewStringPointer(fileName),
}), 16) }), 16)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "get object %s", fileName) return nil, errors.Wrapf(err, "get object %s", fileName)
} }
return newBPFFD(uint32(ptr)), nil return internal.NewFD(uint32(ptr)), nil
} }
func bpfGetObjectInfoByFD(fd *bpfFD, info unsafe.Pointer, size uintptr) error { func bpfGetObjectInfoByFD(fd *internal.FD, info unsafe.Pointer, size uintptr) error {
value, err := fd.value() value, err := fd.Value()
if err != nil { if err != nil {
return err return err
} }
@ -359,19 +326,19 @@ func bpfGetObjectInfoByFD(fd *bpfFD, info unsafe.Pointer, size uintptr) error {
attr := bpfObjGetInfoByFDAttr{ attr := bpfObjGetInfoByFDAttr{
fd: value, fd: value,
infoLen: uint32(size), infoLen: uint32(size),
info: newPtr(info), info: internal.NewPointer(info),
} }
_, err = bpfCall(_ObjGetInfoByFD, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) _, err = internal.BPF(_ObjGetInfoByFD, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
return errors.Wrapf(err, "fd %d", value) return errors.Wrapf(err, "fd %d", fd)
} }
func bpfGetProgInfoByFD(fd *bpfFD) (*bpfProgInfo, error) { func bpfGetProgInfoByFD(fd *internal.FD) (*bpfProgInfo, error) {
var info bpfProgInfo var info bpfProgInfo
err := bpfGetObjectInfoByFD(fd, unsafe.Pointer(&info), unsafe.Sizeof(info)) err := bpfGetObjectInfoByFD(fd, unsafe.Pointer(&info), unsafe.Sizeof(info))
return &info, errors.Wrap(err, "can't get program info") return &info, errors.Wrap(err, "can't get program info")
} }
func bpfGetMapInfoByFD(fd *bpfFD) (*bpfMapInfo, error) { func bpfGetMapInfoByFD(fd *internal.FD) (*bpfMapInfo, error) {
var info bpfMapInfo var info bpfMapInfo
err := bpfGetObjectInfoByFD(fd, unsafe.Pointer(&info), unsafe.Sizeof(info)) err := bpfGetObjectInfoByFD(fd, unsafe.Pointer(&info), unsafe.Sizeof(info))
return &info, errors.Wrap(err, "can't get map info") return &info, errors.Wrap(err, "can't get map info")
@ -398,50 +365,30 @@ var haveObjName = internal.FeatureTest("object names", "4.15", func() bool {
return false return false
} }
_ = fd.close() _ = fd.Close()
return true return true
}) })
func bpfGetMapFDByID(id uint32) (*bpfFD, error) { func bpfGetMapFDByID(id uint32) (*internal.FD, error) {
// available from 4.13 // available from 4.13
attr := bpfGetFDByIDAttr{ attr := bpfGetFDByIDAttr{
id: id, id: id,
} }
ptr, err := bpfCall(_MapGetFDByID, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) ptr, err := internal.BPF(_MapGetFDByID, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "can't get fd for map id %d", id) return nil, errors.Wrapf(err, "can't get fd for map id %d", id)
} }
return newBPFFD(uint32(ptr)), nil return internal.NewFD(uint32(ptr)), nil
} }
func bpfGetProgramFDByID(id uint32) (*bpfFD, error) { func bpfGetProgramFDByID(id uint32) (*internal.FD, error) {
// available from 4.13 // available from 4.13
attr := bpfGetFDByIDAttr{ attr := bpfGetFDByIDAttr{
id: id, id: id,
} }
ptr, err := bpfCall(_ProgGetFDByID, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) ptr, err := internal.BPF(_ProgGetFDByID, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "can't get fd for program id %d", id) return nil, errors.Wrapf(err, "can't get fd for program id %d", id)
} }
return newBPFFD(uint32(ptr)), nil return internal.NewFD(uint32(ptr)), nil
}
func bpfCall(cmd int, attr unsafe.Pointer, size uintptr) (uintptr, error) {
r1, _, errNo := unix.Syscall(unix.SYS_BPF, uintptr(cmd), uintptr(attr), size)
runtime.KeepAlive(attr)
var err error
if errNo != 0 {
err = errNo
}
return r1, err
}
func convertCString(in []byte) string {
inLen := bytes.IndexByte(in, 0)
if inLen == -1 {
return ""
}
return string(in[:inLen])
} }

View File

@ -57,6 +57,30 @@ const (
// HashOfMaps - Each item in the hash map is another map. The inner map mustn't be a map of maps // HashOfMaps - Each item in the hash map is another map. The inner map mustn't be a map of maps
// itself. // itself.
HashOfMaps HashOfMaps
// DevMap - Specialized map to store references to network devices.
DevMap
// SockMap - Specialized map to store references to sockets.
SockMap
// CPUMap - Specialized map to store references to CPUs.
CPUMap
// XSKMap - Specialized map for XDP programs to store references to open sockets.
XSKMap
// SockHash - Specialized hash to store references to sockets.
SockHash
// CGroupStorage - Special map for CGroups.
CGroupStorage
// ReusePortSockArray - Specialized map to store references to sockets that can be reused.
ReusePortSockArray
// PerCPUCGroupStorage - Special per CPU map for CGroups.
PerCPUCGroupStorage
// Queue - FIFO storage for BPF programs.
Queue
// Stack - LIFO storage for BPF programs.
Stack
// SkStorage - Specialized map for local storage at SK for BPF programs.
SkStorage
// DevMapHash - Hash-based indexing scheme for references to network devices.
DevMapHash
) )
// hasPerCPUValue returns true if the Map stores a value per CPU. // hasPerCPUValue returns true if the Map stores a value per CPU.
@ -84,6 +108,13 @@ const (
_ProgGetFDByID _ProgGetFDByID
_MapGetFDByID _MapGetFDByID
_ObjGetInfoByFD _ObjGetInfoByFD
_ProgQuery
_RawTracepointOpen
_BTFLoad
_BTFGetFDByID
_TaskFDQuery
_MapLookupAndDeleteElem
_MapFreeze
) )
const ( const (
@ -149,6 +180,8 @@ const (
RawTracepointWritable RawTracepointWritable
// CGroupSockopt program // CGroupSockopt program
CGroupSockopt CGroupSockopt
// Tracing program
Tracing
) )
// AttachType of the eBPF program, needed to differentiate allowed context accesses in // AttachType of the eBPF program, needed to differentiate allowed context accesses in
@ -183,6 +216,9 @@ const (
AttachCGroupUDP6Recvmsg AttachCGroupUDP6Recvmsg
AttachCGroupGetsockopt AttachCGroupGetsockopt
AttachCGroupSetsockopt AttachCGroupSetsockopt
AttachTraceRawTp
AttachTraceFEntry
AttachTraceFExit
) )
// AttachFlags of the eBPF program used in BPF_PROG_ATTACH command // AttachFlags of the eBPF program used in BPF_PROG_ATTACH command

View File

@ -22,11 +22,23 @@ func _() {
_ = x[LPMTrie-11] _ = x[LPMTrie-11]
_ = x[ArrayOfMaps-12] _ = x[ArrayOfMaps-12]
_ = x[HashOfMaps-13] _ = x[HashOfMaps-13]
_ = x[DevMap-14]
_ = x[SockMap-15]
_ = x[CPUMap-16]
_ = x[XSKMap-17]
_ = x[SockHash-18]
_ = x[CGroupStorage-19]
_ = x[ReusePortSockArray-20]
_ = x[PerCPUCGroupStorage-21]
_ = x[Queue-22]
_ = x[Stack-23]
_ = x[SkStorage-24]
_ = x[DevMapHash-25]
} }
const _MapType_name = "UnspecifiedMapHashArrayProgramArrayPerfEventArrayPerCPUHashPerCPUArrayStackTraceCGroupArrayLRUHashLRUCPUHashLPMTrieArrayOfMapsHashOfMaps" const _MapType_name = "UnspecifiedMapHashArrayProgramArrayPerfEventArrayPerCPUHashPerCPUArrayStackTraceCGroupArrayLRUHashLRUCPUHashLPMTrieArrayOfMapsHashOfMapsDevMapSockMapCPUMapXSKMapSockHashCGroupStorageReusePortSockArrayPerCPUCGroupStorageQueueStackSkStorageDevMapHash"
var _MapType_index = [...]uint8{0, 14, 18, 23, 35, 49, 59, 70, 80, 91, 98, 108, 115, 126, 136} var _MapType_index = [...]uint8{0, 14, 18, 23, 35, 49, 59, 70, 80, 91, 98, 108, 115, 126, 136, 142, 149, 155, 161, 169, 182, 200, 219, 224, 229, 238, 248}
func (i MapType) String() string { func (i MapType) String() string {
if i >= MapType(len(_MapType_index)-1) { if i >= MapType(len(_MapType_index)-1) {
@ -64,11 +76,12 @@ func _() {
_ = x[CGroupSysctl-23] _ = x[CGroupSysctl-23]
_ = x[RawTracepointWritable-24] _ = x[RawTracepointWritable-24]
_ = x[CGroupSockopt-25] _ = x[CGroupSockopt-25]
_ = x[Tracing-26]
} }
const _ProgramType_name = "UnspecifiedProgramSocketFilterKprobeSchedCLSSchedACTTracePointXDPPerfEventCGroupSKBCGroupSockLWTInLWTOutLWTXmitSockOpsSkSKBCGroupDeviceSkMsgRawTracepointCGroupSockAddrLWTSeg6LocalLircMode2SkReuseportFlowDissectorCGroupSysctlRawTracepointWritableCGroupSockopt" const _ProgramType_name = "UnspecifiedProgramSocketFilterKprobeSchedCLSSchedACTTracePointXDPPerfEventCGroupSKBCGroupSockLWTInLWTOutLWTXmitSockOpsSkSKBCGroupDeviceSkMsgRawTracepointCGroupSockAddrLWTSeg6LocalLircMode2SkReuseportFlowDissectorCGroupSysctlRawTracepointWritableCGroupSockoptTracing"
var _ProgramType_index = [...]uint16{0, 18, 30, 36, 44, 52, 62, 65, 74, 83, 93, 98, 104, 111, 118, 123, 135, 140, 153, 167, 179, 188, 199, 212, 224, 245, 258} var _ProgramType_index = [...]uint16{0, 18, 30, 36, 44, 52, 62, 65, 74, 83, 93, 98, 104, 111, 118, 123, 135, 140, 153, 167, 179, 188, 199, 212, 224, 245, 258, 265}
func (i ProgramType) String() string { func (i ProgramType) String() string {
if i >= ProgramType(len(_ProgramType_index)-1) { if i >= ProgramType(len(_ProgramType_index)-1) {

View File

@ -90,54 +90,47 @@ func (b *blkioController) Update(path string, resources *specs.LinuxResources) e
func (b *blkioController) Stat(path string, stats *v1.Metrics) error { func (b *blkioController) Stat(path string, stats *v1.Metrics) error {
stats.Blkio = &v1.BlkIOStat{} stats.Blkio = &v1.BlkIOStat{}
settings := []blkioStatSettings{
{ var settings []blkioStatSettings
name: "throttle.io_serviced",
entry: &stats.Blkio.IoServicedRecursive,
},
{
name: "throttle.io_service_bytes",
entry: &stats.Blkio.IoServiceBytesRecursive,
},
}
// Try to read CFQ stats available on all CFQ enabled kernels first // Try to read CFQ stats available on all CFQ enabled kernels first
if _, err := os.Lstat(filepath.Join(b.Path(path), fmt.Sprintf("blkio.io_serviced_recursive"))); err == nil { if _, err := os.Lstat(filepath.Join(b.Path(path), fmt.Sprintf("blkio.io_serviced_recursive"))); err == nil {
settings = []blkioStatSettings{} settings = []blkioStatSettings{
settings = append(settings, {
blkioStatSettings{
name: "sectors_recursive", name: "sectors_recursive",
entry: &stats.Blkio.SectorsRecursive, entry: &stats.Blkio.SectorsRecursive,
}, },
blkioStatSettings{ {
name: "io_service_bytes_recursive", name: "io_service_bytes_recursive",
entry: &stats.Blkio.IoServiceBytesRecursive, entry: &stats.Blkio.IoServiceBytesRecursive,
}, },
blkioStatSettings{ {
name: "io_serviced_recursive", name: "io_serviced_recursive",
entry: &stats.Blkio.IoServicedRecursive, entry: &stats.Blkio.IoServicedRecursive,
}, },
blkioStatSettings{ {
name: "io_queued_recursive", name: "io_queued_recursive",
entry: &stats.Blkio.IoQueuedRecursive, entry: &stats.Blkio.IoQueuedRecursive,
}, },
blkioStatSettings{ {
name: "io_service_time_recursive", name: "io_service_time_recursive",
entry: &stats.Blkio.IoServiceTimeRecursive, entry: &stats.Blkio.IoServiceTimeRecursive,
}, },
blkioStatSettings{ {
name: "io_wait_time_recursive", name: "io_wait_time_recursive",
entry: &stats.Blkio.IoWaitTimeRecursive, entry: &stats.Blkio.IoWaitTimeRecursive,
}, },
blkioStatSettings{ {
name: "io_merged_recursive", name: "io_merged_recursive",
entry: &stats.Blkio.IoMergedRecursive, entry: &stats.Blkio.IoMergedRecursive,
}, },
blkioStatSettings{ {
name: "time_recursive", name: "time_recursive",
entry: &stats.Blkio.IoTimeRecursive, entry: &stats.Blkio.IoTimeRecursive,
}, },
) }
} }
f, err := os.Open(filepath.Join(b.procRoot, "diskstats")) f, err := os.Open(filepath.Join(b.procRoot, "diskstats"))
if err != nil { if err != nil {
return err return err
@ -149,6 +142,29 @@ func (b *blkioController) Stat(path string, stats *v1.Metrics) error {
return err return err
} }
var size int
for _, t := range settings {
if err := b.readEntry(devices, path, t.name, t.entry); err != nil {
return err
}
size += len(*t.entry)
}
if size > 0 {
return nil
}
// Even the kernel is compiled with the CFQ scheduler, the cgroup may not use
// block devices with the CFQ scheduler. If so, we should fallback to throttle.* files.
settings = []blkioStatSettings{
{
name: "throttle.io_serviced",
entry: &stats.Blkio.IoServicedRecursive,
},
{
name: "throttle.io_service_bytes",
entry: &stats.Blkio.IoServiceBytesRecursive,
},
}
for _, t := range settings { for _, t := range settings {
if err := b.readEntry(devices, path, t.name, t.entry); err != nil { if err := b.readEntry(devices, path, t.name, t.entry); err != nil {
return err return err
@ -165,9 +181,6 @@ func (b *blkioController) readEntry(devices map[deviceKey]string, path, name str
defer f.Close() defer f.Close()
sc := bufio.NewScanner(f) sc := bufio.NewScanner(f)
for sc.Scan() { for sc.Scan() {
if err := sc.Err(); err != nil {
return err
}
// format: dev type amount // format: dev type amount
fields := strings.FieldsFunc(sc.Text(), splitBlkIOStatLine) fields := strings.FieldsFunc(sc.Text(), splitBlkIOStatLine)
if len(fields) < 3 { if len(fields) < 3 {
@ -204,7 +217,7 @@ func (b *blkioController) readEntry(devices map[deviceKey]string, path, name str
Value: v, Value: v,
}) })
} }
return nil return sc.Err()
} }
func createBlkioSettings(blkio *specs.LinuxBlockIO) []blkioSettings { func createBlkioSettings(blkio *specs.LinuxBlockIO) []blkioSettings {

View File

@ -110,9 +110,6 @@ func (c *cpuController) Stat(path string, stats *v1.Metrics) error {
// get or create the cpu field because cpuacct can also set values on this struct // get or create the cpu field because cpuacct can also set values on this struct
sc := bufio.NewScanner(f) sc := bufio.NewScanner(f)
for sc.Scan() { for sc.Scan() {
if err := sc.Err(); err != nil {
return err
}
key, v, err := parseKV(sc.Text()) key, v, err := parseKV(sc.Text())
if err != nil { if err != nil {
return err return err
@ -126,5 +123,5 @@ func (c *cpuController) Stat(path string, stats *v1.Metrics) error {
stats.CPU.Throttling.ThrottledTime = v stats.CPU.Throttling.ThrottledTime = v
} }
} }
return nil return sc.Err()
} }

View File

@ -1,16 +1,19 @@
module github.com/containerd/cgroups module github.com/containerd/cgroups
go 1.12 go 1.13
require ( require (
github.com/cilium/ebpf v0.0.0-20191113100448-d9fb101ca1fb github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3
github.com/coreos/go-systemd/v22 v22.0.0-20191111152658-2d78030078ef github.com/coreos/go-systemd/v22 v22.0.0
github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect
github.com/docker/go-units v0.4.0 github.com/docker/go-units v0.4.0
github.com/godbus/dbus/v5 v5.0.3 github.com/godbus/dbus/v5 v5.0.3
github.com/gogo/protobuf v1.2.1 github.com/gogo/protobuf v1.3.1
github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700 github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700
github.com/pkg/errors v0.8.1 github.com/pkg/errors v0.9.1
github.com/sirupsen/logrus v1.4.2 github.com/sirupsen/logrus v1.4.2
github.com/urfave/cli v1.22.1 github.com/stretchr/testify v1.2.2
golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea github.com/urfave/cli v1.22.2
golang.org/x/sys v0.0.0-20200120151820-655fe14d7479
) )

View File

@ -236,9 +236,6 @@ func (m *memoryController) parseStats(r io.Reader, stat *v1.MemoryStat) error {
line int line int
) )
for sc.Scan() { for sc.Scan() {
if err := sc.Err(); err != nil {
return err
}
key, v, err := parseKV(sc.Text()) key, v, err := parseKV(sc.Text())
if err != nil { if err != nil {
return fmt.Errorf("%d: %v", line, err) return fmt.Errorf("%d: %v", line, err)
@ -246,6 +243,9 @@ func (m *memoryController) parseStats(r io.Reader, stat *v1.MemoryStat) error {
raw[key] = v raw[key] = v
line++ line++
} }
if err := sc.Err(); err != nil {
return err
}
stat.Cache = raw["cache"] stat.Cache = raw["cache"]
stat.RSS = raw["rss"] stat.RSS = raw["rss"]
stat.RSSHuge = raw["rss_huge"] stat.RSSHuge = raw["rss_huge"]

View File

@ -181,6 +181,10 @@ func readPids(path string, subsystem Name) ([]Process, error) {
}) })
} }
} }
if err := s.Err(); err != nil {
// failed to read all pids?
return nil, err
}
return out, nil return out, nil
} }
@ -208,6 +212,9 @@ func readTasksPids(path string, subsystem Name) ([]Task, error) {
}) })
} }
} }
if err := s.Err(); err != nil {
return nil, err
}
return out, nil return out, nil
} }
@ -286,9 +293,6 @@ func parseCgroupFromReader(r io.Reader) (map[string]string, error) {
s = bufio.NewScanner(r) s = bufio.NewScanner(r)
) )
for s.Scan() { for s.Scan() {
if err := s.Err(); err != nil {
return nil, err
}
var ( var (
text = s.Text() text = s.Text()
parts = strings.SplitN(text, ":", 3) parts = strings.SplitN(text, ":", 3)
@ -302,6 +306,9 @@ func parseCgroupFromReader(r io.Reader) (map[string]string, error) {
} }
} }
} }
if err := s.Err(); err != nil {
return nil, err
}
return cgroups, nil return cgroups, nil
} }
@ -313,16 +320,23 @@ func getCgroupDestination(subsystem string) (string, error) {
defer f.Close() defer f.Close()
s := bufio.NewScanner(f) s := bufio.NewScanner(f)
for s.Scan() { for s.Scan() {
if err := s.Err(); err != nil { fields := strings.Split(s.Text(), " ")
return "", err if len(fields) < 10 {
// broken mountinfo?
continue
}
if fields[len(fields)-3] != "cgroup" {
continue
} }
fields := strings.Fields(s.Text())
for _, opt := range strings.Split(fields[len(fields)-1], ",") { for _, opt := range strings.Split(fields[len(fields)-1], ",") {
if opt == subsystem { if opt == subsystem {
return fields[3], nil return fields[3], nil
} }
} }
} }
if err := s.Err(); err != nil {
return "", err
}
return "", ErrNoCgroupMountDestination return "", ErrNoCgroupMountDestination
} }

View File

@ -54,28 +54,20 @@ func v1MountPoint() (string, error) {
defer f.Close() defer f.Close()
scanner := bufio.NewScanner(f) scanner := bufio.NewScanner(f)
for scanner.Scan() { for scanner.Scan() {
if err := scanner.Err(); err != nil {
return "", err
}
var ( var (
text = scanner.Text() text = scanner.Text()
fields = strings.Split(text, " ") fields = strings.Split(text, " ")
// safe as mountinfo encodes mountpoints with spaces as \040. numFields = len(fields)
index = strings.Index(text, " - ")
postSeparatorFields = strings.Fields(text[index+3:])
numPostFields = len(postSeparatorFields)
) )
// this is an error as we can't detect if the mount is for "cgroup" if numFields < 10 {
if numPostFields == 0 { return "", fmt.Errorf("mountinfo: bad entry %q", text)
return "", fmt.Errorf("Found no fields post '-' in %q", text)
} }
if postSeparatorFields[0] == "cgroup" { if fields[numFields-3] == "cgroup" {
// check that the mount is properly formated.
if numPostFields < 3 {
return "", fmt.Errorf("Error found less than 3 fields post '-' in %q", text)
}
return filepath.Dir(fields[4]), nil return filepath.Dir(fields[4]), nil
} }
} }
if err := scanner.Err(); err != nil {
return "", err
}
return "", ErrMountPointNotExist return "", ErrMountPointNotExist
} }

37
vendor/github.com/containerd/cgroups/v2/hugetlb.go generated vendored Normal file
View File

@ -0,0 +1,37 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v2
import "strings"
type HugeTlb []HugeTlbEntry
type HugeTlbEntry struct {
HugePageSize string
Limit uint64
}
func (r *HugeTlb) Values() (o []Value) {
for _, e := range *r {
o = append(o, Value{
filename: strings.Join([]string{"hugetlb", e.HugePageSize, "max"}, "."),
value: e.Limit,
})
}
return o
}

View File

@ -66,11 +66,12 @@ type Event struct {
// Resources for a cgroups v2 unified hierarchy // Resources for a cgroups v2 unified hierarchy
type Resources struct { type Resources struct {
CPU *CPU CPU *CPU
Memory *Memory Memory *Memory
Pids *Pids Pids *Pids
IO *IO IO *IO
RDMA *RDMA RDMA *RDMA
HugeTlb *HugeTlb
// When len(Devices) is zero, devices are not controlled // When len(Devices) is zero, devices are not controlled
Devices []specs.LinuxDeviceCgroup Devices []specs.LinuxDeviceCgroup
} }
@ -93,6 +94,9 @@ func (r *Resources) Values() (o []Value) {
if r.RDMA != nil { if r.RDMA != nil {
o = append(o, r.RDMA.Values()...) o = append(o, r.RDMA.Values()...)
} }
if r.HugeTlb != nil {
o = append(o, r.HugeTlb.Values()...)
}
return o return o
} }
@ -114,6 +118,9 @@ func (r *Resources) EnabledControllers() (c []string) {
if r.RDMA != nil { if r.RDMA != nil {
c = append(c, "rdma") c = append(c, "rdma")
} }
if r.HugeTlb != nil {
c = append(c, "hugetlb")
}
return return
} }
@ -422,11 +429,11 @@ func (c *Manager) Stat() (*stats.Metrics, error) {
} }
metrics.Io = &stats.IOStat{Usage: readIoStats(c.path)} metrics.Io = &stats.IOStat{Usage: readIoStats(c.path)}
metrics.Rdma = &stats.RdmaStat{ metrics.Rdma = &stats.RdmaStat{
Current: rdmaStats(filepath.Join(c.path, "rdma.current")), Current: rdmaStats(filepath.Join(c.path, "rdma.current")),
Limit: rdmaStats(filepath.Join(c.path, "rdma.max")), Limit: rdmaStats(filepath.Join(c.path, "rdma.max")),
} }
metrics.Hugetlb = readHugeTlbStats(c.path)
return &metrics, nil return &metrics, nil
} }
@ -489,16 +496,13 @@ func readKVStatsFile(path string, file string, out map[string]interface{}) error
s := bufio.NewScanner(f) s := bufio.NewScanner(f)
for s.Scan() { for s.Scan() {
if err := s.Err(); err != nil {
return err
}
name, value, err := parseKV(s.Text()) name, value, err := parseKV(s.Text())
if err != nil { if err != nil {
return errors.Wrapf(err, "error while parsing %s (line=%q)", filepath.Join(path, file), s.Text()) return errors.Wrapf(err, "error while parsing %s (line=%q)", filepath.Join(path, file), s.Text())
} }
out[name] = value out[name] = value
} }
return nil return s.Err()
} }
func (c *Manager) Freeze() error { func (c *Manager) Freeze() error {
@ -526,20 +530,20 @@ func (c *Manager) freeze(path string, state State) error {
} }
} }
func (c *Manager) MemoryEventFD() (uintptr, error) { // MemoryEventFD returns inotify file descriptor and 'memory.events' inotify watch descriptor
func (c *Manager) MemoryEventFD() (int, uint32, error) {
fpath := filepath.Join(c.path, "memory.events") fpath := filepath.Join(c.path, "memory.events")
fd, err := syscall.InotifyInit() fd, err := syscall.InotifyInit()
if err != nil { if err != nil {
return 0, errors.Errorf("Failed to create inotify fd") return 0, 0, errors.Errorf("Failed to create inotify fd")
} }
defer syscall.Close(fd)
wd, err := syscall.InotifyAddWatch(fd, fpath, unix.IN_MODIFY) wd, err := syscall.InotifyAddWatch(fd, fpath, unix.IN_MODIFY)
if wd < 0 { if wd < 0 {
return 0, errors.Errorf("Failed to add inotify watch for %q", fpath) syscall.Close(fd)
return 0, 0, errors.Errorf("Failed to add inotify watch for %q", fpath)
} }
defer syscall.InotifyRmWatch(fd, uint32(wd))
return uintptr(fd), nil return fd, uint32(wd), nil
} }
func (c *Manager) EventChan() (<-chan Event, <-chan error) { func (c *Manager) EventChan() (<-chan Event, <-chan error) {
@ -551,15 +555,22 @@ func (c *Manager) EventChan() (<-chan Event, <-chan error) {
} }
func (c *Manager) waitForEvents(ec chan<- Event, errCh chan<- error) { func (c *Manager) waitForEvents(ec chan<- Event, errCh chan<- error) {
fd, err := c.MemoryEventFD() fd, wd, err := c.MemoryEventFD()
defer syscall.InotifyRmWatch(fd, wd)
defer syscall.Close(fd)
if err != nil { if err != nil {
errCh <- errors.Errorf("Failed to create memory event fd") errCh <- err
return
} }
for { for {
buffer := make([]byte, syscall.SizeofInotifyEvent*10) buffer := make([]byte, syscall.SizeofInotifyEvent*10)
bytesRead, err := syscall.Read(int(fd), buffer) bytesRead, err := syscall.Read(fd, buffer)
if err != nil { if err != nil {
errCh <- err errCh <- err
return
} }
var out map[string]interface{} var out map[string]interface{}
if bytesRead >= syscall.SizeofInotifyEvent { if bytesRead >= syscall.SizeofInotifyEvent {
@ -572,6 +583,9 @@ func (c *Manager) waitForEvents(ec chan<- Event, errCh chan<- error) {
OOMKill: out["oom_kill"].(uint64), OOMKill: out["oom_kill"].(uint64),
} }
ec <- e ec <- e
} else {
errCh <- err
return
} }
} }
} }

View File

@ -25,14 +25,15 @@ var _ = math.Inf
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
type Metrics struct { type Metrics struct {
Pids *PidsStat `protobuf:"bytes,1,opt,name=pids,proto3" json:"pids,omitempty"` Pids *PidsStat `protobuf:"bytes,1,opt,name=pids,proto3" json:"pids,omitempty"`
CPU *CPUStat `protobuf:"bytes,2,opt,name=cpu,proto3" json:"cpu,omitempty"` CPU *CPUStat `protobuf:"bytes,2,opt,name=cpu,proto3" json:"cpu,omitempty"`
Memory *MemoryStat `protobuf:"bytes,4,opt,name=memory,proto3" json:"memory,omitempty"` Memory *MemoryStat `protobuf:"bytes,4,opt,name=memory,proto3" json:"memory,omitempty"`
Rdma *RdmaStat `protobuf:"bytes,5,opt,name=rdma,proto3" json:"rdma,omitempty"` Rdma *RdmaStat `protobuf:"bytes,5,opt,name=rdma,proto3" json:"rdma,omitempty"`
Io *IOStat `protobuf:"bytes,6,opt,name=io,proto3" json:"io,omitempty"` Io *IOStat `protobuf:"bytes,6,opt,name=io,proto3" json:"io,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"` Hugetlb []*HugeTlbStat `protobuf:"bytes,7,rep,name=hugetlb,proto3" json:"hugetlb,omitempty"`
XXX_unrecognized []byte `json:"-"` XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_sizecache int32 `json:"-"` XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
} }
func (m *Metrics) Reset() { *m = Metrics{} } func (m *Metrics) Reset() { *m = Metrics{} }
@ -388,6 +389,47 @@ func (m *IOEntry) XXX_DiscardUnknown() {
var xxx_messageInfo_IOEntry proto.InternalMessageInfo var xxx_messageInfo_IOEntry proto.InternalMessageInfo
type HugeTlbStat struct {
Current uint64 `protobuf:"varint,1,opt,name=current,proto3" json:"current,omitempty"`
Max uint64 `protobuf:"varint,2,opt,name=max,proto3" json:"max,omitempty"`
Pagesize string `protobuf:"bytes,3,opt,name=pagesize,proto3" json:"pagesize,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *HugeTlbStat) Reset() { *m = HugeTlbStat{} }
func (*HugeTlbStat) ProtoMessage() {}
func (*HugeTlbStat) Descriptor() ([]byte, []int) {
return fileDescriptor_2fc6005842049e6b, []int{8}
}
func (m *HugeTlbStat) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *HugeTlbStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_HugeTlbStat.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *HugeTlbStat) XXX_Merge(src proto.Message) {
xxx_messageInfo_HugeTlbStat.Merge(m, src)
}
func (m *HugeTlbStat) XXX_Size() int {
return m.Size()
}
func (m *HugeTlbStat) XXX_DiscardUnknown() {
xxx_messageInfo_HugeTlbStat.DiscardUnknown(m)
}
var xxx_messageInfo_HugeTlbStat proto.InternalMessageInfo
func init() { func init() {
proto.RegisterType((*Metrics)(nil), "io.containerd.cgroups.v2.Metrics") proto.RegisterType((*Metrics)(nil), "io.containerd.cgroups.v2.Metrics")
proto.RegisterType((*PidsStat)(nil), "io.containerd.cgroups.v2.PidsStat") proto.RegisterType((*PidsStat)(nil), "io.containerd.cgroups.v2.PidsStat")
@ -397,6 +439,7 @@ func init() {
proto.RegisterType((*RdmaEntry)(nil), "io.containerd.cgroups.v2.RdmaEntry") proto.RegisterType((*RdmaEntry)(nil), "io.containerd.cgroups.v2.RdmaEntry")
proto.RegisterType((*IOStat)(nil), "io.containerd.cgroups.v2.IOStat") proto.RegisterType((*IOStat)(nil), "io.containerd.cgroups.v2.IOStat")
proto.RegisterType((*IOEntry)(nil), "io.containerd.cgroups.v2.IOEntry") proto.RegisterType((*IOEntry)(nil), "io.containerd.cgroups.v2.IOEntry")
proto.RegisterType((*HugeTlbStat)(nil), "io.containerd.cgroups.v2.HugeTlbStat")
} }
func init() { func init() {
@ -404,74 +447,77 @@ func init() {
} }
var fileDescriptor_2fc6005842049e6b = []byte{ var fileDescriptor_2fc6005842049e6b = []byte{
// 1064 bytes of a gzipped FileDescriptorProto // 1118 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x56, 0x4f, 0x6f, 0x1c, 0xc5, 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x56, 0x4d, 0x6f, 0x1c, 0x45,
0x13, 0xcd, 0xda, 0x9b, 0xfd, 0xd3, 0x6b, 0x27, 0x4e, 0xc7, 0xf1, 0xaf, 0xe3, 0xfc, 0xbc, 0xb6, 0x10, 0xcd, 0xda, 0x9b, 0xfd, 0xe8, 0xb5, 0x13, 0xa7, 0xe3, 0x84, 0x4e, 0x42, 0xd6, 0xf6, 0x86,
0xd7, 0x80, 0x82, 0x04, 0xbb, 0xc8, 0xfc, 0x13, 0x28, 0x1c, 0x1c, 0x43, 0x04, 0x12, 0x26, 0xd6, 0xa0, 0x20, 0xc1, 0x2e, 0x32, 0x5f, 0x02, 0x05, 0x21, 0x27, 0x10, 0x05, 0x09, 0x13, 0x6b, 0xe2,
0x26, 0x16, 0xc7, 0x51, 0xef, 0x4c, 0x7b, 0xb7, 0xed, 0x99, 0xe9, 0x51, 0x77, 0xaf, 0xad, 0xe5, 0x15, 0xc7, 0x51, 0xef, 0x4c, 0x7b, 0x76, 0xec, 0xf9, 0x52, 0x77, 0x8f, 0xcd, 0xe6, 0xc4, 0x81,
0xc4, 0x81, 0x2b, 0xe2, 0x63, 0xf0, 0x55, 0x72, 0x83, 0x23, 0x27, 0x44, 0xfc, 0x49, 0x50, 0x55, 0x2b, 0xe2, 0x6f, 0xe5, 0x06, 0x47, 0x4e, 0x88, 0xf8, 0xc4, 0xcf, 0x40, 0x55, 0xd5, 0xb3, 0x33,
0xf5, 0x78, 0x86, 0x83, 0x03, 0xb7, 0xae, 0xf7, 0x5e, 0xd5, 0x54, 0xbd, 0xd9, 0xae, 0x59, 0xf6, 0x1c, 0x6c, 0xb8, 0x75, 0xbd, 0x7a, 0xaf, 0xa6, 0xfa, 0xf5, 0x76, 0xf5, 0xb2, 0x8f, 0xa3, 0xd8,
0xd1, 0x54, 0xfb, 0xd9, 0x7c, 0x32, 0x8c, 0x4d, 0x36, 0x8a, 0x4d, 0xee, 0xa5, 0xce, 0x95, 0x4d, 0xce, 0xcb, 0xd9, 0x38, 0xc8, 0xd3, 0x49, 0x90, 0x67, 0x56, 0xc6, 0x99, 0xd2, 0xe1, 0x24, 0x88,
0x46, 0xf1, 0xd4, 0x9a, 0x79, 0xe1, 0x46, 0x17, 0xfb, 0x23, 0xe7, 0xa5, 0x77, 0xa3, 0x4c, 0x79, 0x74, 0x5e, 0x16, 0x66, 0x72, 0xba, 0x3b, 0x31, 0x56, 0x5a, 0x33, 0x49, 0x95, 0xd5, 0x71, 0x60,
0xab, 0x63, 0x37, 0x2c, 0xac, 0xf1, 0x86, 0x0b, 0x6d, 0x86, 0x95, 0x7a, 0x18, 0xd4, 0xc3, 0x8b, 0xc6, 0x85, 0xce, 0x6d, 0xce, 0x45, 0x9c, 0x8f, 0x6b, 0xf6, 0xd8, 0xb1, 0xc7, 0xa7, 0xbb, 0x77,
0xfd, 0xcd, 0xf5, 0xa9, 0x99, 0x1a, 0x14, 0x8d, 0xe0, 0x44, 0xfa, 0xc1, 0xaf, 0x4b, 0xac, 0x7d, 0x37, 0xa3, 0x3c, 0xca, 0x91, 0x34, 0x81, 0x15, 0xf1, 0x47, 0x7f, 0xaf, 0xb0, 0xee, 0x3e, 0x55,
0x44, 0x15, 0xf8, 0x27, 0xac, 0x59, 0xe8, 0xc4, 0x89, 0xc6, 0x4e, 0xe3, 0x71, 0x6f, 0x7f, 0x30, 0xe0, 0x9f, 0xb2, 0x76, 0x11, 0x87, 0x46, 0xb4, 0xb6, 0x5b, 0x8f, 0x06, 0xbb, 0xa3, 0xf1, 0x45,
0xbc, 0xa9, 0xd4, 0xf0, 0x58, 0x27, 0xee, 0x85, 0x97, 0x7e, 0x8c, 0x7a, 0xfe, 0x84, 0x2d, 0xc7, 0xa5, 0xc6, 0x07, 0x71, 0x68, 0x5e, 0x5a, 0x69, 0x3d, 0xe4, 0xf3, 0xc7, 0x6c, 0x35, 0x28, 0x4a,
0xc5, 0x5c, 0x2c, 0x61, 0xda, 0xee, 0xcd, 0x69, 0x87, 0xc7, 0x27, 0x90, 0xf5, 0xb4, 0x7d, 0xf5, 0xb1, 0x82, 0xb2, 0x9d, 0x8b, 0x65, 0x4f, 0x0f, 0xa6, 0xa0, 0x7a, 0xd2, 0x3d, 0xff, 0x73, 0x6b,
0xe7, 0xf6, 0xf2, 0xe1, 0xf1, 0xc9, 0x18, 0xd2, 0xf8, 0x13, 0xd6, 0xca, 0x54, 0x66, 0xec, 0x42, 0xf5, 0xe9, 0xc1, 0xd4, 0x03, 0x19, 0x7f, 0xcc, 0x3a, 0xa9, 0x4a, 0x73, 0xbd, 0x10, 0x6d, 0x2c,
0x34, 0xb1, 0xc0, 0x5b, 0x37, 0x17, 0x38, 0x42, 0x1d, 0x3e, 0x39, 0xe4, 0x40, 0xcf, 0x36, 0xc9, 0xf0, 0xce, 0xc5, 0x05, 0xf6, 0x91, 0x87, 0x5f, 0x76, 0x1a, 0xe8, 0x59, 0x87, 0xa9, 0x14, 0x57,
0xa4, 0xb8, 0xfd, 0x6f, 0x3d, 0x8f, 0x93, 0x4c, 0x52, 0xcf, 0xa0, 0xe7, 0x1f, 0xb0, 0x25, 0x6d, 0xff, 0xab, 0x67, 0x2f, 0x4c, 0x25, 0xf5, 0x0c, 0x7c, 0xfe, 0x21, 0x5b, 0x89, 0x73, 0xd1, 0x41,
0x44, 0x0b, 0xb3, 0x76, 0x6e, 0xce, 0xfa, 0xe6, 0x39, 0xe6, 0x2c, 0x69, 0x33, 0xf8, 0x9c, 0x75, 0xd5, 0xf6, 0xc5, 0xaa, 0x6f, 0x5f, 0xa0, 0x66, 0x25, 0xce, 0xf9, 0x57, 0xac, 0x3b, 0x2f, 0x23,
0xca, 0xb9, 0xb9, 0x60, 0xed, 0x78, 0x6e, 0xad, 0xca, 0x3d, 0x9a, 0xd5, 0x1c, 0x97, 0x21, 0x5f, 0x65, 0x93, 0x99, 0xe8, 0x6e, 0xaf, 0x3e, 0x1a, 0xec, 0x3e, 0xbc, 0x58, 0xf6, 0xbc, 0x8c, 0xd4,
0x67, 0xb7, 0x53, 0x9d, 0x69, 0x8f, 0x6e, 0x34, 0xc7, 0x14, 0x0c, 0x7e, 0x6b, 0xb0, 0x76, 0x98, 0x61, 0x32, 0x43, 0x6d, 0xa5, 0x1a, 0x7d, 0xc1, 0x7a, 0x95, 0x71, 0x5c, 0xb0, 0x6e, 0x50, 0x6a,
0x9e, 0x6f, 0x31, 0x36, 0x77, 0x72, 0xaa, 0xa2, 0xb9, 0x53, 0x71, 0x48, 0xef, 0x22, 0x72, 0xe2, 0xad, 0x32, 0x8b, 0x6e, 0xb7, 0xbd, 0x2a, 0xe4, 0x9b, 0xec, 0x6a, 0x12, 0xa7, 0xb1, 0x45, 0x3b,
0x54, 0xcc, 0x1f, 0xb1, 0xee, 0xdc, 0x29, 0x4b, 0x2c, 0x15, 0xe9, 0x00, 0x80, 0xe4, 0x36, 0xeb, 0xdb, 0x1e, 0x05, 0xa3, 0xdf, 0x5a, 0xac, 0xeb, 0xec, 0xe3, 0xf7, 0x19, 0x2b, 0x8d, 0x8c, 0x94,
0xb9, 0x85, 0xf3, 0x2a, 0x23, 0x7a, 0x19, 0x69, 0x46, 0x10, 0x0a, 0xb6, 0x18, 0xcb, 0x6d, 0x54, 0x5f, 0x1a, 0x15, 0x38, 0x79, 0x1f, 0x91, 0xa9, 0x51, 0x01, 0xbf, 0xc7, 0xfa, 0xa5, 0x51, 0x9a,
0x28, 0xab, 0x4d, 0xe2, 0xd0, 0xd0, 0xe6, 0xb8, 0x9b, 0xdb, 0x63, 0x02, 0xf8, 0x2e, 0x5b, 0xc9, 0xb2, 0x54, 0xa4, 0x07, 0x00, 0x26, 0xb7, 0xd8, 0xc0, 0x2c, 0x8c, 0x55, 0x29, 0xa5, 0x57, 0x31,
0x6d, 0xe4, 0x67, 0xd6, 0x78, 0x9f, 0xaa, 0x04, 0x5d, 0x6b, 0x8e, 0x7b, 0xb9, 0x7d, 0x59, 0x42, 0xcd, 0x08, 0x42, 0xc2, 0x7d, 0xc6, 0x32, 0xed, 0x17, 0x4a, 0xc7, 0x79, 0x68, 0xf0, 0x44, 0xda,
0xfc, 0x6d, 0x76, 0xe7, 0x9a, 0xa7, 0xa7, 0xb4, 0x50, 0xb4, 0x7a, 0x8d, 0xc2, 0x83, 0x06, 0xbf, 0x5e, 0x3f, 0xd3, 0x07, 0x04, 0xf0, 0x1d, 0xb6, 0x96, 0x69, 0xdf, 0xce, 0x75, 0x6e, 0x6d, 0xa2,
0x74, 0x19, 0xab, 0x5e, 0x07, 0xe7, 0xac, 0x29, 0x73, 0x93, 0x87, 0x71, 0xf0, 0x0c, 0xd8, 0xa9, 0x42, 0xb4, 0xbd, 0xed, 0x0d, 0x32, 0x7d, 0x58, 0x41, 0xfc, 0x21, 0xbb, 0xb6, 0xcc, 0xd3, 0x57,
0x4e, 0x55, 0x18, 0x02, 0xcf, 0xd0, 0xc0, 0xb9, 0xb2, 0xb9, 0x4a, 0x23, 0xe7, 0x65, 0x7c, 0x1e, 0x3a, 0x48, 0x5a, 0x5f, 0xa2, 0xf0, 0xa1, 0xd1, 0xaf, 0x7d, 0xc6, 0xea, 0xf3, 0xe4, 0x9c, 0xb5,
0x26, 0xe8, 0x11, 0xf6, 0x02, 0x20, 0x48, 0x73, 0xa9, 0x9c, 0x84, 0xe6, 0xf1, 0x8c, 0x98, 0x89, 0x65, 0x96, 0x67, 0x6e, 0x3b, 0xb8, 0x06, 0xec, 0x28, 0x4e, 0x94, 0xdb, 0x04, 0xae, 0xa1, 0x81,
0xcf, 0x43, 0xbf, 0x78, 0x06, 0xa7, 0xdd, 0x2c, 0x53, 0x59, 0xe8, 0x8f, 0x02, 0x70, 0x08, 0x1e, 0x13, 0xa5, 0x33, 0x95, 0xf8, 0xc6, 0xca, 0xe0, 0xc4, 0xed, 0x60, 0x40, 0xd8, 0x4b, 0x80, 0x40,
0x14, 0x65, 0xb2, 0x28, 0x54, 0x22, 0xda, 0xe4, 0x10, 0x40, 0x47, 0x88, 0x80, 0x43, 0x28, 0x48, 0x66, 0x12, 0x39, 0x73, 0xcd, 0xe3, 0x1a, 0xb1, 0x3c, 0x38, 0x71, 0xfd, 0xe2, 0x1a, 0x9c, 0x36,
0xb4, 0xf5, 0x0b, 0xd1, 0x21, 0x87, 0x00, 0xf9, 0x12, 0x00, 0x18, 0x1f, 0xe9, 0x4b, 0xab, 0xbd, 0xf3, 0x54, 0xa5, 0xae, 0x3f, 0x0a, 0xc0, 0x21, 0xf8, 0x90, 0x9f, 0xca, 0xa2, 0x50, 0xa1, 0xe8,
0x9a, 0x40, 0x8b, 0x5d, 0x1a, 0x1f, 0xd0, 0xef, 0x4b, 0x90, 0x3f, 0x64, 0x1d, 0x98, 0x31, 0xf2, 0x92, 0x43, 0x00, 0xed, 0x23, 0x02, 0x0e, 0x21, 0x21, 0x8c, 0xb5, 0x5d, 0x88, 0x1e, 0x39, 0x04,
0xb3, 0x42, 0x30, 0xfa, 0x05, 0x40, 0xfc, 0x72, 0x56, 0xf0, 0x3d, 0xb6, 0xaa, 0x73, 0x19, 0x7b, 0xc8, 0xd7, 0x00, 0xc0, 0xf6, 0x31, 0x7d, 0xa6, 0x63, 0xab, 0x66, 0xd0, 0x62, 0x9f, 0xb6, 0x0f,
0x7d, 0xa1, 0x22, 0xf4, 0xa4, 0x87, 0xfc, 0x4a, 0x09, 0x1e, 0x80, 0x37, 0xdb, 0xac, 0x57, 0x97, 0xe8, 0x0f, 0x15, 0xc8, 0xef, 0xb0, 0x1e, 0xec, 0xd1, 0xb7, 0xf3, 0x42, 0x30, 0xfa, 0x05, 0x40,
0xac, 0x50, 0x9b, 0x35, 0x41, 0xbd, 0x0a, 0xba, 0xb8, 0xfa, 0xcf, 0x2a, 0xcf, 0xc0, 0xcd, 0xaa, 0x7c, 0x38, 0x2f, 0xf8, 0x03, 0xb6, 0x1e, 0x67, 0x32, 0xb0, 0xf1, 0xa9, 0xf2, 0xd1, 0x93, 0x01,
0x0a, 0x4a, 0xee, 0xd4, 0xab, 0xa0, 0x60, 0x87, 0xf5, 0xe6, 0xb9, 0xba, 0xd0, 0xb1, 0x97, 0x93, 0xe6, 0xd7, 0x2a, 0x70, 0x0f, 0xbc, 0xd9, 0x62, 0x83, 0x26, 0x65, 0x8d, 0xda, 0x6c, 0x10, 0x9a,
0x54, 0x89, 0xbb, 0xe4, 0x76, 0x0d, 0xe2, 0xef, 0xb2, 0x35, 0x70, 0x38, 0xb2, 0x2a, 0x4e, 0xa5, 0x55, 0xd0, 0xc5, 0xf5, 0x7f, 0x57, 0x79, 0x06, 0x6e, 0xd6, 0x55, 0x90, 0x72, 0xad, 0x59, 0x05,
0xce, 0x50, 0xb6, 0x86, 0xb2, 0xbb, 0x80, 0x8f, 0x2b, 0x98, 0xbf, 0xcf, 0x38, 0x4a, 0xe7, 0x79, 0x09, 0xdb, 0x6c, 0x50, 0x66, 0xea, 0x34, 0x0e, 0xac, 0x9c, 0x25, 0x4a, 0x5c, 0x27, 0xb7, 0x1b,
0x5d, 0x7c, 0x0f, 0xc5, 0xf7, 0x80, 0x39, 0xa9, 0x13, 0x70, 0x47, 0x8a, 0xe9, 0xa9, 0x9c, 0xa7, 0x10, 0x7f, 0x8f, 0x6d, 0x80, 0xc3, 0xbe, 0x56, 0x41, 0x22, 0xe3, 0x14, 0x69, 0x1b, 0x48, 0xbb,
0x5e, 0x70, 0x72, 0x28, 0x84, 0xbc, 0xcf, 0x58, 0x31, 0xcd, 0xe4, 0x19, 0x91, 0xf7, 0xa9, 0xeb, 0x0e, 0xb8, 0x57, 0xc3, 0xfc, 0x03, 0xc6, 0x91, 0x5a, 0x66, 0x4d, 0xf2, 0x0d, 0x24, 0xdf, 0x80,
0x0a, 0x81, 0x07, 0x5d, 0x1a, 0x7b, 0xae, 0xf3, 0xa9, 0x53, 0x3e, 0xb2, 0x8a, 0x74, 0xeb, 0xf4, 0xcc, 0xb4, 0x99, 0x80, 0x3b, 0x52, 0x44, 0x47, 0xb2, 0x4c, 0xac, 0xe0, 0xe4, 0x90, 0x0b, 0xf9,
0xa0, 0x8a, 0x19, 0x13, 0xc1, 0x47, 0xec, 0x7e, 0x4d, 0x8e, 0xd3, 0x4b, 0xaf, 0xc4, 0x03, 0xd4, 0x90, 0xb1, 0x22, 0x4a, 0xe5, 0x31, 0x25, 0x6f, 0x52, 0xd7, 0x35, 0x02, 0x1f, 0x3a, 0xcb, 0xf5,
0xd7, 0x2a, 0x1d, 0x04, 0x86, 0x7f, 0xcc, 0x36, 0x6a, 0x09, 0xb9, 0x49, 0x54, 0xe8, 0x5b, 0x6c, 0x49, 0x9c, 0x45, 0x46, 0x59, 0x5f, 0x2b, 0xe2, 0x6d, 0xd2, 0x87, 0xea, 0x8c, 0x47, 0x09, 0x3e,
0x60, 0xce, 0x83, 0x8a, 0xfd, 0xae, 0x22, 0xf9, 0x26, 0xeb, 0x14, 0x53, 0xab, 0x4e, 0x75, 0x9a, 0x61, 0x37, 0x1b, 0x74, 0xdc, 0xbd, 0xb4, 0x4a, 0xdc, 0x42, 0x7e, 0xa3, 0xd2, 0x9e, 0xcb, 0xf0,
0x8a, 0xff, 0xd1, 0xc5, 0x2c, 0x63, 0xbe, 0xc1, 0x5a, 0xc5, 0xd4, 0xc5, 0x32, 0x17, 0x02, 0x99, 0x4f, 0xd8, 0xed, 0x86, 0x20, 0xcb, 0x43, 0xe5, 0xfa, 0x16, 0xb7, 0x51, 0x73, 0xab, 0xce, 0x7e,
0x10, 0x91, 0x09, 0xce, 0x2b, 0x99, 0x8a, 0x87, 0xa5, 0x09, 0x18, 0x92, 0x09, 0xd7, 0xcd, 0x6e, 0x5f, 0x27, 0xf9, 0x5d, 0xd6, 0x2b, 0x22, 0xad, 0x8e, 0xe2, 0x24, 0x11, 0x6f, 0xd1, 0xc5, 0xac,
0x96, 0x26, 0x94, 0x08, 0x1f, 0xb0, 0x95, 0x62, 0x9a, 0xa8, 0x6b, 0xc5, 0x23, 0x7a, 0xff, 0x75, 0x62, 0x7e, 0x9b, 0x75, 0x8a, 0xc8, 0x04, 0x32, 0x13, 0x02, 0x33, 0x2e, 0x22, 0x13, 0x8c, 0x55,
0x8c, 0x6a, 0xa4, 0xf2, 0x87, 0xc5, 0xa9, 0x55, 0x4a, 0xfc, 0xbf, 0xac, 0x51, 0x22, 0xf0, 0xfa, 0x32, 0x11, 0x77, 0x2a, 0x13, 0x30, 0x24, 0x13, 0x96, 0xcd, 0xde, 0xad, 0x4c, 0xa8, 0x10, 0x3e,
0xab, 0x28, 0x11, 0x5b, 0xf4, 0xfa, 0x6b, 0x10, 0x7f, 0x87, 0xdd, 0xf5, 0xb3, 0x22, 0x42, 0x23, 0x62, 0x6b, 0x45, 0x14, 0xaa, 0x25, 0xe3, 0x1e, 0x9d, 0x7f, 0x13, 0xa3, 0x1a, 0x89, 0x7c, 0xb5,
0x23, 0x99, 0xa6, 0x26, 0x16, 0xfd, 0xf2, 0xba, 0x17, 0xcf, 0x00, 0x3d, 0x00, 0x90, 0xbf, 0xc7, 0x38, 0xd2, 0x4a, 0x89, 0xb7, 0xab, 0x1a, 0x15, 0x02, 0xc7, 0x5f, 0x47, 0xa1, 0xb8, 0x4f, 0xc7,
0x38, 0xe8, 0x62, 0x93, 0xa6, 0xb2, 0x70, 0x2a, 0x48, 0xb7, 0x51, 0xba, 0xe6, 0x67, 0xc5, 0x61, 0xdf, 0x80, 0xf8, 0xbb, 0xec, 0xba, 0x9d, 0x17, 0x3e, 0x1a, 0xe9, 0xcb, 0x24, 0xc9, 0x03, 0x31,
0x20, 0x48, 0xbd, 0xce, 0x6e, 0xe3, 0x42, 0x13, 0x3b, 0x74, 0x35, 0x31, 0x80, 0x5f, 0x2b, 0x2d, 0xac, 0xae, 0x7b, 0xf1, 0x0c, 0xd0, 0x3d, 0x00, 0xf9, 0xfb, 0x8c, 0x03, 0x2f, 0xc8, 0x93, 0x44,
0x3e, 0x5a, 0x90, 0xbb, 0xd4, 0x2e, 0x42, 0xdf, 0x02, 0x02, 0x57, 0xd3, 0x5d, 0xca, 0x22, 0xa2, 0x16, 0x46, 0x39, 0xea, 0x16, 0x52, 0x37, 0xec, 0xbc, 0x78, 0xea, 0x12, 0xc4, 0xde, 0x64, 0x57,
0xdc, 0x01, 0x5d, 0x4d, 0x40, 0x4e, 0x30, 0xbf, 0xa4, 0x29, 0x7d, 0xaf, 0xa2, 0x31, 0x7b, 0xf0, 0x71, 0xa0, 0x89, 0x6d, 0xba, 0x9a, 0x18, 0xc0, 0xaf, 0x95, 0x06, 0x1f, 0x0d, 0xc8, 0x1d, 0x6a,
0x53, 0x83, 0x75, 0xca, 0x25, 0xcf, 0xbf, 0xa8, 0x2f, 0xe8, 0xe5, 0xc7, 0xbd, 0xfd, 0xbd, 0x37, 0x17, 0xa1, 0xef, 0x00, 0x81, 0xab, 0x69, 0xce, 0x64, 0xe1, 0x93, 0x76, 0x44, 0x57, 0x13, 0x90,
0x7f, 0x19, 0xbe, 0xca, 0xbd, 0x5d, 0x54, 0x5b, 0xfc, 0xb3, 0x6a, 0x8b, 0xff, 0xe7, 0xe4, 0xb0, 0x29, 0xea, 0xab, 0x34, 0xc9, 0x1f, 0xd4, 0x69, 0x54, 0x8f, 0x7e, 0x6e, 0xb1, 0x5e, 0xf5, 0x4a,
0xea, 0x15, 0xeb, 0x5e, 0x63, 0xf0, 0xb3, 0x48, 0xe0, 0xae, 0x29, 0x5c, 0x8c, 0xdd, 0x71, 0x88, 0xf0, 0x2f, 0x9b, 0x03, 0x1a, 0xa6, 0xfd, 0x83, 0xcb, 0x9f, 0x96, 0x6f, 0x32, 0xab, 0x17, 0xf5,
0xc0, 0x8a, 0x59, 0x2c, 0xa3, 0x99, 0xcc, 0x93, 0x54, 0x39, 0xdc, 0x90, 0xab, 0x63, 0x36, 0x8b, 0x14, 0xff, 0xbc, 0x9e, 0xe2, 0xff, 0x5b, 0xec, 0x46, 0xbd, 0x62, 0xfd, 0x25, 0x06, 0x3f, 0x8b,
0xe5, 0xd7, 0x84, 0x94, 0x02, 0x33, 0x39, 0x53, 0xb1, 0x77, 0xb8, 0x26, 0x49, 0xf0, 0x9c, 0x90, 0x10, 0xee, 0x9a, 0xc2, 0xc1, 0xd8, 0xf7, 0x5c, 0x04, 0x56, 0xcc, 0x03, 0xe9, 0xcf, 0x65, 0x16,
0xc1, 0x01, 0x6b, 0xd1, 0xb7, 0x89, 0x7f, 0x5a, 0x9a, 0x4d, 0x83, 0xee, 0xbe, 0xe9, 0x63, 0x16, 0x26, 0xca, 0xe0, 0x84, 0x5c, 0xf7, 0xd8, 0x3c, 0x90, 0xcf, 0x09, 0xa9, 0x08, 0xf9, 0xec, 0x58,
0x3a, 0x45, 0xfd, 0xe0, 0xe7, 0x06, 0x6b, 0x07, 0x08, 0xde, 0x58, 0x26, 0xcf, 0x8c, 0x0d, 0x0b, 0x05, 0xd6, 0xe0, 0x98, 0x24, 0xc2, 0x0b, 0x42, 0x46, 0x7b, 0xac, 0x43, 0x8f, 0x1b, 0xff, 0xac,
0x9c, 0x02, 0x44, 0x75, 0x6e, 0x6c, 0xf9, 0x31, 0xc3, 0x00, 0x86, 0xb2, 0x93, 0x85, 0x57, 0x2e, 0x32, 0x9b, 0x36, 0xba, 0x73, 0xd9, 0x6b, 0xe8, 0x3a, 0x45, 0xfe, 0xe8, 0x97, 0x16, 0xeb, 0x3a,
0x6c, 0xef, 0x10, 0x01, 0x7e, 0x49, 0x38, 0xad, 0xee, 0x10, 0xc1, 0xf2, 0xb6, 0xda, 0xb8, 0x72, 0x08, 0x4e, 0x2c, 0x95, 0xc7, 0xb9, 0x76, 0x03, 0x9c, 0x02, 0x44, 0xe3, 0x2c, 0xd7, 0xd5, 0x63,
0x79, 0xc3, 0x19, 0xb0, 0x4b, 0xc0, 0x68, 0x77, 0xe3, 0xf9, 0xa9, 0x78, 0xf5, 0xba, 0x7f, 0xeb, 0x86, 0x01, 0x6c, 0x4a, 0xcf, 0x16, 0x56, 0x19, 0x37, 0xbd, 0x5d, 0x04, 0xf8, 0x19, 0xe1, 0x34,
0x8f, 0xd7, 0xfd, 0x5b, 0x3f, 0x5e, 0xf5, 0x1b, 0xaf, 0xae, 0xfa, 0x8d, 0xdf, 0xaf, 0xfa, 0x8d, 0xba, 0x5d, 0x04, 0xc3, 0x5b, 0xc7, 0xb9, 0xa9, 0x86, 0x37, 0xac, 0x01, 0x3b, 0x03, 0x8c, 0x66,
0xbf, 0xae, 0xfa, 0x8d, 0x49, 0x0b, 0xff, 0xab, 0x7c, 0xf8, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0x37, 0xae, 0x47, 0x53, 0x36, 0x68, 0x3c, 0xbc, 0x97, 0xbc, 0xb1, 0x1b, 0x6c, 0x35, 0x95, 0x3f,
0xf0, 0xd3, 0x07, 0x15, 0x13, 0x09, 0x00, 0x00, 0xba, 0xa6, 0x60, 0x89, 0x57, 0x53, 0x46, 0xca, 0xc4, 0xaf, 0x14, 0x36, 0xd5, 0xf7, 0x96, 0xf1,
0x13, 0xf1, 0xfa, 0xcd, 0xf0, 0xca, 0x1f, 0x6f, 0x86, 0x57, 0x7e, 0x3a, 0x1f, 0xb6, 0x5e, 0x9f,
0x0f, 0x5b, 0xbf, 0x9f, 0x0f, 0x5b, 0x7f, 0x9d, 0x0f, 0x5b, 0xb3, 0x0e, 0xfe, 0x87, 0xfa, 0xe8,
0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x79, 0xd2, 0xcd, 0xe2, 0xab, 0x09, 0x00, 0x00,
} }
func (m *Metrics) Marshal() (dAtA []byte, err error) { func (m *Metrics) Marshal() (dAtA []byte, err error) {
@ -539,6 +585,18 @@ func (m *Metrics) MarshalTo(dAtA []byte) (int, error) {
} }
i += n5 i += n5
} }
if len(m.Hugetlb) > 0 {
for _, msg := range m.Hugetlb {
dAtA[i] = 0x3a
i++
i = encodeVarintMetrics(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if m.XXX_unrecognized != nil { if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized) i += copy(dAtA[i:], m.XXX_unrecognized)
} }
@ -1029,6 +1087,43 @@ func (m *IOEntry) MarshalTo(dAtA []byte) (int, error) {
return i, nil return i, nil
} }
func (m *HugeTlbStat) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *HugeTlbStat) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Current != 0 {
dAtA[i] = 0x8
i++
i = encodeVarintMetrics(dAtA, i, uint64(m.Current))
}
if m.Max != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintMetrics(dAtA, i, uint64(m.Max))
}
if len(m.Pagesize) > 0 {
dAtA[i] = 0x1a
i++
i = encodeVarintMetrics(dAtA, i, uint64(len(m.Pagesize)))
i += copy(dAtA[i:], m.Pagesize)
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func encodeVarintMetrics(dAtA []byte, offset int, v uint64) int { func encodeVarintMetrics(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 { for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80) dAtA[offset] = uint8(v&0x7f | 0x80)
@ -1064,6 +1159,12 @@ func (m *Metrics) Size() (n int) {
l = m.Io.Size() l = m.Io.Size()
n += 1 + l + sovMetrics(uint64(l)) n += 1 + l + sovMetrics(uint64(l))
} }
if len(m.Hugetlb) > 0 {
for _, e := range m.Hugetlb {
l = e.Size()
n += 1 + l + sovMetrics(uint64(l))
}
}
if m.XXX_unrecognized != nil { if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized) n += len(m.XXX_unrecognized)
} }
@ -1329,6 +1430,28 @@ func (m *IOEntry) Size() (n int) {
return n return n
} }
func (m *HugeTlbStat) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.Current != 0 {
n += 1 + sovMetrics(uint64(m.Current))
}
if m.Max != 0 {
n += 1 + sovMetrics(uint64(m.Max))
}
l = len(m.Pagesize)
if l > 0 {
n += 1 + l + sovMetrics(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func sovMetrics(x uint64) (n int) { func sovMetrics(x uint64) (n int) {
for { for {
n++ n++
@ -1352,6 +1475,7 @@ func (this *Metrics) String() string {
`Memory:` + strings.Replace(fmt.Sprintf("%v", this.Memory), "MemoryStat", "MemoryStat", 1) + `,`, `Memory:` + strings.Replace(fmt.Sprintf("%v", this.Memory), "MemoryStat", "MemoryStat", 1) + `,`,
`Rdma:` + strings.Replace(fmt.Sprintf("%v", this.Rdma), "RdmaStat", "RdmaStat", 1) + `,`, `Rdma:` + strings.Replace(fmt.Sprintf("%v", this.Rdma), "RdmaStat", "RdmaStat", 1) + `,`,
`Io:` + strings.Replace(fmt.Sprintf("%v", this.Io), "IOStat", "IOStat", 1) + `,`, `Io:` + strings.Replace(fmt.Sprintf("%v", this.Io), "IOStat", "IOStat", 1) + `,`,
`Hugetlb:` + strings.Replace(fmt.Sprintf("%v", this.Hugetlb), "HugeTlbStat", "HugeTlbStat", 1) + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`, `}`,
}, "") }, "")
@ -1482,6 +1606,19 @@ func (this *IOEntry) String() string {
}, "") }, "")
return s return s
} }
func (this *HugeTlbStat) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&HugeTlbStat{`,
`Current:` + fmt.Sprintf("%v", this.Current) + `,`,
`Max:` + fmt.Sprintf("%v", this.Max) + `,`,
`Pagesize:` + fmt.Sprintf("%v", this.Pagesize) + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`,
}, "")
return s
}
func valueToStringMetrics(v interface{}) string { func valueToStringMetrics(v interface{}) string {
rv := reflect.ValueOf(v) rv := reflect.ValueOf(v)
if rv.IsNil() { if rv.IsNil() {
@ -1699,6 +1836,40 @@ func (m *Metrics) Unmarshal(dAtA []byte) error {
return err return err
} }
iNdEx = postIndex iNdEx = postIndex
case 7:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Hugetlb", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Hugetlb = append(m.Hugetlb, &HugeTlbStat{})
if err := m.Hugetlb[len(m.Hugetlb)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default: default:
iNdEx = preIndex iNdEx = preIndex
skippy, err := skipMetrics(dAtA[iNdEx:]) skippy, err := skipMetrics(dAtA[iNdEx:])
@ -3205,6 +3376,130 @@ func (m *IOEntry) Unmarshal(dAtA []byte) error {
} }
return nil return nil
} }
func (m *HugeTlbStat) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: HugeTlbStat: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: HugeTlbStat: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType)
}
m.Current = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Current |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType)
}
m.Max = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Max |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Pagesize", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Pagesize = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipMetrics(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthMetrics
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthMetrics
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipMetrics(dAtA []byte) (n int, err error) { func skipMetrics(dAtA []byte) (n int, err error) {
l := len(dAtA) l := len(dAtA)
iNdEx := 0 iNdEx := 0

View File

@ -10,6 +10,7 @@ message Metrics {
MemoryStat memory = 4; MemoryStat memory = 4;
RdmaStat rdma = 5; RdmaStat rdma = 5;
IOStat io = 6; IOStat io = 6;
repeated HugeTlbStat hugetlb = 7;
} }
message PidsStat { message PidsStat {
@ -87,3 +88,9 @@ message IOEntry {
uint64 rios = 5; uint64 rios = 5;
uint64 wios = 6; uint64 wios = 6;
} }
message HugeTlbStat {
uint64 current = 1;
uint64 max = 2;
string pagesize = 3;
}

View File

@ -85,6 +85,9 @@ func parseCgroupProcsFile(path string) ([]uint64, error) {
out = append(out, pid) out = append(out, pid)
} }
} }
if err := s.Err(); err != nil {
return nil, err
}
return out, nil return out, nil
} }
@ -144,9 +147,6 @@ func parseCgroupFromReader(r io.Reader) (string, error) {
s = bufio.NewScanner(r) s = bufio.NewScanner(r)
) )
for s.Scan() { for s.Scan() {
if err := s.Err(); err != nil {
return "", err
}
var ( var (
text = s.Text() text = s.Text()
parts = strings.SplitN(text, ":", 3) parts = strings.SplitN(text, ":", 3)
@ -159,6 +159,9 @@ func parseCgroupFromReader(r io.Reader) (string, error) {
return parts[2], nil return parts[2], nil
} }
} }
if err := s.Err(); err != nil {
return "", err
}
return "", fmt.Errorf("cgroup path not found") return "", fmt.Errorf("cgroup path not found")
} }
@ -194,6 +197,16 @@ func ToResources(spec *specs.LinuxResources) *Resources {
resources.Memory.Low = l resources.Memory.Low = l
} }
} }
if hugetlbs := spec.HugepageLimits; hugetlbs != nil {
hugeTlbUsage := HugeTlb{}
for _, hugetlb := range hugetlbs {
hugeTlbUsage = append(hugeTlbUsage, HugeTlbEntry{
HugePageSize: hugetlb.Pagesize,
Limit: hugetlb.Limit,
})
}
resources.HugeTlb = &hugeTlbUsage
}
if pids := spec.Pids; pids != nil { if pids := spec.Pids; pids != nil {
resources.Pids = &Pids{ resources.Pids = &Pids{
Max: pids.Limit, Max: pids.Limit,
@ -202,7 +215,7 @@ func ToResources(spec *specs.LinuxResources) *Resources {
if i := spec.BlockIO; i != nil { if i := spec.BlockIO; i != nil {
resources.IO = &IO{} resources.IO = &IO{}
if i.Weight != nil { if i.Weight != nil {
resources.IO.BFQ.Weight = *i.Weight resources.IO.BFQ.Weight = 1 + (*i.Weight-10)*9999/990
} }
for t, devices := range map[IOType][]specs.LinuxThrottleDevice{ for t, devices := range map[IOType][]specs.LinuxThrottleDevice{
ReadBPS: i.ThrottleReadBpsDevice, ReadBPS: i.ThrottleReadBpsDevice,
@ -377,3 +390,56 @@ func systemdUnitFromPath(path string) string {
_, unit := filepath.Split(path) _, unit := filepath.Split(path)
return unit return unit
} }
func readHugeTlbStats(path string) []*stats.HugeTlbStat {
var usage = []*stats.HugeTlbStat{}
var keyUsage = make(map[string]*stats.HugeTlbStat)
f, err := os.Open(path)
if err != nil {
return usage
}
files, err := f.Readdir(-1)
f.Close()
if err != nil {
return usage
}
for _, file := range files {
if strings.Contains(file.Name(), "hugetlb") &&
(strings.HasSuffix(file.Name(), "max") || strings.HasSuffix(file.Name(), "current")) {
var hugeTlb *stats.HugeTlbStat
var ok bool
fileName := strings.Split(file.Name(), ".")
pageSize := fileName[1]
if hugeTlb, ok = keyUsage[pageSize]; !ok {
hugeTlb = &stats.HugeTlbStat{}
}
hugeTlb.Pagesize = pageSize
out, err := ioutil.ReadFile(filepath.Join(path, file.Name()))
if err != nil {
continue
}
var value uint64
stringVal := strings.TrimSpace(string(out))
if stringVal == "max" {
value = math.MaxUint64
} else {
value, err = strconv.ParseUint(stringVal, 10, 64)
}
if err != nil {
continue
}
switch fileName[2] {
case "max":
hugeTlb.Max = value
case "current":
hugeTlb.Current = value
}
keyUsage[pageSize] = hugeTlb
}
}
for _, entry := range keyUsage {
usage = append(usage, entry)
}
return usage
}

View File

@ -1,9 +1,9 @@
![containerd banner](https://raw.githubusercontent.com/cncf/artwork/master/projects/containerd/horizontal/color/containerd-horizontal-color.png) ![containerd banner](https://raw.githubusercontent.com/cncf/artwork/master/projects/containerd/horizontal/color/containerd-horizontal-color.png)
[![GoDoc](https://godoc.org/github.com/containerd/containerd?status.svg)](https://godoc.org/github.com/containerd/containerd) [![GoDoc](https://godoc.org/github.com/containerd/containerd?status.svg)](https://godoc.org/github.com/containerd/containerd)
[![Build Status](https://travis-ci.org/containerd/containerd.svg?branch=master)](https://travis-ci.org/containerd/containerd) [![Build Status](https://github.com/containerd/containerd/workflows/CI/badge.svg)](https://github.com/containerd/containerd/actions?query=workflow%3ACI)
[![Windows Build Status](https://ci.appveyor.com/api/projects/status/github/containerd/containerd?branch=master&svg=true)](https://ci.appveyor.com/project/mlaventure/containerd-3g73f?branch=master) [![Windows Build Status](https://ci.appveyor.com/api/projects/status/github/containerd/containerd?branch=master&svg=true)](https://ci.appveyor.com/project/mlaventure/containerd-3g73f?branch=master)
![](https://github.com/containerd/containerd/workflows/Nightly/badge.svg) [![Nightlies](https://github.com/containerd/containerd/workflows/Nightly/badge.svg)](https://github.com/containerd/containerd/actions?query=workflow%3ANightly)
[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fcontainerd%2Fcontainerd.svg?type=shield)](https://app.fossa.io/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fcontainerd%2Fcontainerd?ref=badge_shield) [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fcontainerd%2Fcontainerd.svg?type=shield)](https://app.fossa.io/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fcontainerd%2Fcontainerd?ref=badge_shield)
[![Go Report Card](https://goreportcard.com/badge/github.com/containerd/containerd)](https://goreportcard.com/report/github.com/containerd/containerd) [![Go Report Card](https://goreportcard.com/badge/github.com/containerd/containerd)](https://goreportcard.com/report/github.com/containerd/containerd)
[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/1271/badge)](https://bestpractices.coreinfrastructure.org/projects/1271) [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/1271/badge)](https://bestpractices.coreinfrastructure.org/projects/1271)

View File

@ -22,12 +22,11 @@ import (
"archive/tar" "archive/tar"
"os" "os"
"strings" "strings"
"sync"
"syscall" "syscall"
"github.com/containerd/containerd/sys"
"github.com/containerd/continuity/fs" "github.com/containerd/continuity/fs"
"github.com/containerd/continuity/sysx" "github.com/containerd/continuity/sysx"
"github.com/opencontainers/runc/libcontainer/system"
"github.com/pkg/errors" "github.com/pkg/errors"
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
) )
@ -84,21 +83,11 @@ func mkdir(path string, perm os.FileMode) error {
return os.Chmod(path, perm) return os.Chmod(path, perm)
} }
var (
inUserNS bool
nsOnce sync.Once
)
func setInUserNS() {
inUserNS = system.RunningInUserNS()
}
func skipFile(hdr *tar.Header) bool { func skipFile(hdr *tar.Header) bool {
switch hdr.Typeflag { switch hdr.Typeflag {
case tar.TypeBlock, tar.TypeChar: case tar.TypeBlock, tar.TypeChar:
// cannot create a device if running in user namespace // cannot create a device if running in user namespace
nsOnce.Do(setInUserNS) return sys.RunningInUserNS()
return inUserNS
default: default:
return false return false
} }
@ -125,7 +114,7 @@ func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
if hdr.Typeflag == tar.TypeLink { if hdr.Typeflag == tar.TypeLink {
if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
if err := os.Chmod(path, hdrInfo.Mode()); err != nil { if err := os.Chmod(path, hdrInfo.Mode()); err != nil && !os.IsNotExist(err) {
return err return err
} }
} }

View File

@ -58,7 +58,6 @@ import (
"github.com/containerd/containerd/snapshots" "github.com/containerd/containerd/snapshots"
snproxy "github.com/containerd/containerd/snapshots/proxy" snproxy "github.com/containerd/containerd/snapshots/proxy"
"github.com/containerd/typeurl" "github.com/containerd/typeurl"
"github.com/gogo/protobuf/types"
ptypes "github.com/gogo/protobuf/types" ptypes "github.com/gogo/protobuf/types"
ocispec "github.com/opencontainers/image-spec/specs-go/v1" ocispec "github.com/opencontainers/image-spec/specs-go/v1"
specs "github.com/opencontainers/runtime-spec/specs-go" specs "github.com/opencontainers/runtime-spec/specs-go"
@ -319,6 +318,9 @@ type RemoteContext struct {
// Snapshotter used for unpacking // Snapshotter used for unpacking
Snapshotter string Snapshotter string
// SnapshotterOpts are additional options to be passed to a snapshotter during pull
SnapshotterOpts []snapshots.Opt
// Labels to be applied to the created image // Labels to be applied to the created image
Labels map[string]string Labels map[string]string
@ -720,7 +722,7 @@ func (c *Client) Server(ctx context.Context) (ServerInfo, error) {
} }
c.connMu.Unlock() c.connMu.Unlock()
response, err := c.IntrospectionService().Server(ctx, &types.Empty{}) response, err := c.IntrospectionService().Server(ctx, &ptypes.Empty{})
if err != nil { if err != nil {
return ServerInfo{}, err return ServerInfo{}, err
} }

View File

@ -22,6 +22,8 @@ import (
"github.com/containerd/containerd/images" "github.com/containerd/containerd/images"
"github.com/containerd/containerd/platforms" "github.com/containerd/containerd/platforms"
"github.com/containerd/containerd/remotes" "github.com/containerd/containerd/remotes"
"github.com/containerd/containerd/snapshots"
"google.golang.org/grpc" "google.golang.org/grpc"
) )
@ -138,10 +140,11 @@ func WithUnpackOpts(opts []UnpackOpt) RemoteOpt {
} }
} }
// WithPullSnapshotter specifies snapshotter name used for unpacking // WithPullSnapshotter specifies snapshotter name used for unpacking.
func WithPullSnapshotter(snapshotterName string) RemoteOpt { func WithPullSnapshotter(snapshotterName string, opts ...snapshots.Opt) RemoteOpt {
return func(_ *Client, c *RemoteContext) error { return func(_ *Client, c *RemoteContext) error {
c.Snapshotter = snapshotterName c.Snapshotter = snapshotterName
c.SnapshotterOpts = opts
return nil return nil
} }
} }

View File

@ -227,6 +227,10 @@ can be used and modified as necessary as a custom configuration.`
} }
serve(ctx, l, server.ServeGRPC) serve(ctx, l, server.ServeGRPC)
if err := notifyReady(ctx); err != nil {
log.G(ctx).WithError(err).Warn("notify ready failed")
}
log.G(ctx).Infof("containerd successfully booted in %fs", time.Since(start).Seconds()) log.G(ctx).Infof("containerd successfully booted in %fs", time.Since(start).Seconds())
<-done <-done
return nil return nil

View File

@ -52,6 +52,10 @@ func handleSignals(ctx context.Context, signals chan os.Signal, serverC chan *se
case unix.SIGPIPE: case unix.SIGPIPE:
continue continue
default: default:
if err := notifyStopping(ctx); err != nil {
log.G(ctx).WithError(err).Error("notify stopping failed")
}
if server == nil { if server == nil {
close(done) close(done)
return return

View File

@ -50,6 +50,11 @@ func handleSignals(ctx context.Context, signals chan os.Signal, serverC chan *se
server = s server = s
case s := <-signals: case s := <-signals:
log.G(ctx).WithField("signal", s).Debug("received signal") log.G(ctx).WithField("signal", s).Debug("received signal")
if err := notifyStopping(ctx); err != nil {
log.G(ctx).WithError(err).Error("notify stopping failed")
}
if server == nil { if server == nil {
close(done) close(done)
return return

View File

@ -0,0 +1,47 @@
// +build linux
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package command
import (
"context"
sd "github.com/coreos/go-systemd/v22/daemon"
"github.com/containerd/containerd/log"
)
// notifyReady notifies systemd that the daemon is ready to serve requests
func notifyReady(ctx context.Context) error {
return sdNotify(ctx, sd.SdNotifyReady)
}
// notifyStopping notifies systemd that the daemon is about to be stopped
func notifyStopping(ctx context.Context) error {
return sdNotify(ctx, sd.SdNotifyStopping)
}
func sdNotify(ctx context.Context, state string) error {
notified, err := sd.SdNotify(false, state)
log.G(ctx).
WithError(err).
WithField("notified", notified).
WithField("state", state).
Debug("sd notification")
return err
}

View File

@ -0,0 +1,31 @@
// +build !linux
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package command
import (
"context"
)
func notifyReady(ctx context.Context) error {
return nil
}
func notifyStopping(ctx context.Context) error {
return nil
}

View File

@ -49,8 +49,6 @@ var (
allocConsole = kernel32.NewProc("AllocConsole") allocConsole = kernel32.NewProc("AllocConsole")
oldStderr windows.Handle oldStderr windows.Handle
panicFile *os.File panicFile *os.File
service *handler
) )
const defaultServiceName = "containerd" const defaultServiceName = "containerd"
@ -282,7 +280,6 @@ func launchService(s *server.Server, done chan struct{}) error {
return err return err
} }
service = h
go func() { go func() {
if interactive { if interactive {
err = debug.Run(serviceNameFlag, h) err = debug.Run(serviceNameFlag, h)

View File

@ -32,6 +32,7 @@ import (
"github.com/containerd/containerd/images" "github.com/containerd/containerd/images"
"github.com/containerd/containerd/oci" "github.com/containerd/containerd/oci"
"github.com/containerd/containerd/runtime/v2/runc/options" "github.com/containerd/containerd/runtime/v2/runc/options"
"github.com/containerd/containerd/sys"
"github.com/containerd/typeurl" "github.com/containerd/typeurl"
prototypes "github.com/gogo/protobuf/types" prototypes "github.com/gogo/protobuf/types"
ver "github.com/opencontainers/image-spec/specs-go" ver "github.com/opencontainers/image-spec/specs-go"
@ -422,14 +423,33 @@ func attachExistingIO(response *tasks.GetResponse, ioAttach cio.Attach) (cio.IO,
// loadFifos loads the containers fifos // loadFifos loads the containers fifos
func loadFifos(response *tasks.GetResponse) *cio.FIFOSet { func loadFifos(response *tasks.GetResponse) *cio.FIFOSet {
path := getFifoDir([]string{ fifos := []string{
response.Process.Stdin, response.Process.Stdin,
response.Process.Stdout, response.Process.Stdout,
response.Process.Stderr, response.Process.Stderr,
})
closer := func() error {
return os.RemoveAll(path)
} }
closer := func() error {
var (
err error
dirs = map[string]struct{}{}
)
for _, fifo := range fifos {
if isFifo, _ := sys.IsFifo(fifo); isFifo {
if rerr := os.Remove(fifo); err == nil {
err = rerr
}
dirs[filepath.Dir(fifo)] = struct{}{}
}
}
for dir := range dirs {
// we ignore errors here because we don't
// want to remove the directory if it isn't
// empty
os.Remove(dir)
}
return err
}
return cio.NewFIFOSet(cio.Config{ return cio.NewFIFOSet(cio.Config{
Stdin: response.Process.Stdin, Stdin: response.Process.Stdin,
Stdout: response.Process.Stdout, Stdout: response.Process.Stdout,
@ -437,14 +457,3 @@ func loadFifos(response *tasks.GetResponse) *cio.FIFOSet {
Terminal: response.Process.Terminal, Terminal: response.Process.Terminal,
}, closer) }, closer)
} }
// getFifoDir looks for any non-empty path for a stdio fifo
// and returns the dir for where it is located
func getFifoDir(paths []string) string {
for _, p := range paths {
if p != "" {
return filepath.Dir(p)
}
}
return ""
}

View File

@ -92,7 +92,11 @@ func NewLabeledStore(root string, ls LabelStore) (content.Store, error) {
} }
func (s *store) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) { func (s *store) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) {
p := s.blobPath(dgst) p, err := s.blobPath(dgst)
if err != nil {
return content.Info{}, errors.Wrapf(err, "calculating blob info path")
}
fi, err := os.Stat(p) fi, err := os.Stat(p)
if err != nil { if err != nil {
if os.IsNotExist(err) { if os.IsNotExist(err) {
@ -123,7 +127,10 @@ func (s *store) info(dgst digest.Digest, fi os.FileInfo, labels map[string]strin
// ReaderAt returns an io.ReaderAt for the blob. // ReaderAt returns an io.ReaderAt for the blob.
func (s *store) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) { func (s *store) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) {
p := s.blobPath(desc.Digest) p, err := s.blobPath(desc.Digest)
if err != nil {
return nil, errors.Wrapf(err, "calculating blob path for ReaderAt")
}
fi, err := os.Stat(p) fi, err := os.Stat(p)
if err != nil { if err != nil {
if !os.IsNotExist(err) { if !os.IsNotExist(err) {
@ -150,7 +157,12 @@ func (s *store) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.
// While this is safe to do concurrently, safe exist-removal logic must hold // While this is safe to do concurrently, safe exist-removal logic must hold
// some global lock on the store. // some global lock on the store.
func (s *store) Delete(ctx context.Context, dgst digest.Digest) error { func (s *store) Delete(ctx context.Context, dgst digest.Digest) error {
if err := os.RemoveAll(s.blobPath(dgst)); err != nil { bp, err := s.blobPath(dgst)
if err != nil {
return errors.Wrapf(err, "calculating blob path for delete")
}
if err := os.RemoveAll(bp); err != nil {
if !os.IsNotExist(err) { if !os.IsNotExist(err) {
return err return err
} }
@ -166,7 +178,11 @@ func (s *store) Update(ctx context.Context, info content.Info, fieldpaths ...str
return content.Info{}, errors.Wrapf(errdefs.ErrFailedPrecondition, "update not supported on immutable content store") return content.Info{}, errors.Wrapf(errdefs.ErrFailedPrecondition, "update not supported on immutable content store")
} }
p := s.blobPath(info.Digest) p, err := s.blobPath(info.Digest)
if err != nil {
return content.Info{}, errors.Wrapf(err, "calculating blob path for update")
}
fi, err := os.Stat(p) fi, err := os.Stat(p)
if err != nil { if err != nil {
if os.IsNotExist(err) { if os.IsNotExist(err) {
@ -512,7 +528,10 @@ func (s *store) writer(ctx context.Context, ref string, total int64, expected di
// TODO(stevvooe): Need to actually store expected here. We have // TODO(stevvooe): Need to actually store expected here. We have
// code in the service that shouldn't be dealing with this. // code in the service that shouldn't be dealing with this.
if expected != "" { if expected != "" {
p := s.blobPath(expected) p, err := s.blobPath(expected)
if err != nil {
return nil, errors.Wrap(err, "calculating expected blob path for writer")
}
if _, err := os.Stat(p); err == nil { if _, err := os.Stat(p); err == nil {
return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "content %v", expected) return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "content %v", expected)
} }
@ -607,11 +626,17 @@ func (s *store) Abort(ctx context.Context, ref string) error {
return nil return nil
} }
func (s *store) blobPath(dgst digest.Digest) string { func (s *store) blobPath(dgst digest.Digest) (string, error) {
return filepath.Join(s.root, "blobs", dgst.Algorithm().String(), dgst.Hex()) if err := dgst.Validate(); err != nil {
return "", errors.Wrapf(errdefs.ErrInvalidArgument, "cannot calculate blob path from invalid digest: %v", err)
}
return filepath.Join(s.root, "blobs", dgst.Algorithm().String(), dgst.Hex()), nil
} }
func (s *store) ingestRoot(ref string) string { func (s *store) ingestRoot(ref string) string {
// we take a digest of the ref to keep the ingest paths constant length.
// Note that this is not the current or potential digest of incoming content.
dgst := digest.FromString(ref) dgst := digest.FromString(ref)
return filepath.Join(s.root, "ingest", dgst.Hex()) return filepath.Join(s.root, "ingest", dgst.Hex())
} }

View File

@ -115,8 +115,8 @@ func (w *writer) Commit(ctx context.Context, size int64, expected digest.Digest,
} }
var ( var (
ingest = filepath.Join(w.path, "data") ingest = filepath.Join(w.path, "data")
target = w.s.blobPath(dgst) target, _ = w.s.blobPath(dgst) // ignore error because we calculated this dgst
) )
// make sure parent directories of blob exist // make sure parent directories of blob exist

View File

@ -56,7 +56,6 @@ func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp {
"accept4", "accept4",
"access", "access",
"alarm", "alarm",
"alarm",
"bind", "bind",
"brk", "brk",
"capget", "capget",
@ -66,8 +65,11 @@ func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp {
"chown", "chown",
"chown32", "chown32",
"clock_getres", "clock_getres",
"clock_getres_time64",
"clock_gettime", "clock_gettime",
"clock_gettime64",
"clock_nanosleep", "clock_nanosleep",
"clock_nanosleep_time64",
"close", "close",
"connect", "connect",
"copy_file_range", "copy_file_range",
@ -117,6 +119,7 @@ func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp {
"ftruncate", "ftruncate",
"ftruncate64", "ftruncate64",
"futex", "futex",
"futex_time64",
"futimesat", "futimesat",
"getcpu", "getcpu",
"getcwd", "getcwd",
@ -163,6 +166,7 @@ func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp {
"io_destroy", "io_destroy",
"io_getevents", "io_getevents",
"io_pgetevents", "io_pgetevents",
"io_pgetevents_time64",
"ioprio_get", "ioprio_get",
"ioprio_set", "ioprio_set",
"io_setup", "io_setup",
@ -200,7 +204,9 @@ func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp {
"mq_notify", "mq_notify",
"mq_open", "mq_open",
"mq_timedreceive", "mq_timedreceive",
"mq_timedreceive_time64",
"mq_timedsend", "mq_timedsend",
"mq_timedsend_time64",
"mq_unlink", "mq_unlink",
"mremap", "mremap",
"msgctl", "msgctl",
@ -221,11 +227,13 @@ func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp {
"pipe2", "pipe2",
"poll", "poll",
"ppoll", "ppoll",
"ppoll_time64",
"prctl", "prctl",
"pread64", "pread64",
"preadv", "preadv",
"prlimit64", "prlimit64",
"pselect6", "pselect6",
"pselect6_time64",
"pwrite64", "pwrite64",
"pwritev", "pwritev",
"read", "read",
@ -236,6 +244,7 @@ func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp {
"recv", "recv",
"recvfrom", "recvfrom",
"recvmmsg", "recvmmsg",
"recvmmsg_time64",
"recvmsg", "recvmsg",
"remap_file_pages", "remap_file_pages",
"removexattr", "removexattr",
@ -251,6 +260,7 @@ func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp {
"rt_sigreturn", "rt_sigreturn",
"rt_sigsuspend", "rt_sigsuspend",
"rt_sigtimedwait", "rt_sigtimedwait",
"rt_sigtimedwait_time64",
"rt_tgsigqueueinfo", "rt_tgsigqueueinfo",
"sched_getaffinity", "sched_getaffinity",
"sched_getattr", "sched_getattr",
@ -259,6 +269,7 @@ func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp {
"sched_get_priority_min", "sched_get_priority_min",
"sched_getscheduler", "sched_getscheduler",
"sched_rr_get_interval", "sched_rr_get_interval",
"sched_rr_get_interval_time64",
"sched_setaffinity", "sched_setaffinity",
"sched_setattr", "sched_setattr",
"sched_setparam", "sched_setparam",
@ -270,6 +281,7 @@ func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp {
"semget", "semget",
"semop", "semop",
"semtimedop", "semtimedop",
"semtimedop_time64",
"send", "send",
"sendfile", "sendfile",
"sendfile64", "sendfile64",
@ -335,12 +347,16 @@ func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp {
"time", "time",
"timer_create", "timer_create",
"timer_delete", "timer_delete",
"timerfd_create",
"timerfd_gettime",
"timerfd_settime",
"timer_getoverrun", "timer_getoverrun",
"timer_gettime", "timer_gettime",
"timer_gettime64",
"timer_settime", "timer_settime",
"timer_settime64",
"timerfd_create",
"timerfd_gettime",
"timerfd_gettime64",
"timerfd_settime",
"timerfd_settime64",
"times", "times",
"tkill", "tkill",
"truncate", "truncate",
@ -352,6 +368,7 @@ func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp {
"unlinkat", "unlinkat",
"utime", "utime",
"utimensat", "utimensat",
"utimensat_time64",
"utimes", "utimes",
"vfork", "vfork",
"vmsplice", "vmsplice",

View File

@ -26,7 +26,7 @@ import (
"github.com/containerd/containerd/archive" "github.com/containerd/containerd/archive"
"github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/mount" "github.com/containerd/containerd/mount"
"github.com/opencontainers/runc/libcontainer/system" "github.com/containerd/containerd/sys"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
@ -35,7 +35,7 @@ func apply(ctx context.Context, mounts []mount.Mount, r io.Reader) error {
case len(mounts) == 1 && mounts[0].Type == "overlay": case len(mounts) == 1 && mounts[0].Type == "overlay":
// OverlayConvertWhiteout (mknod c 0 0) doesn't work in userns. // OverlayConvertWhiteout (mknod c 0 0) doesn't work in userns.
// https://github.com/containerd/containerd/issues/3762 // https://github.com/containerd/containerd/issues/3762
if system.RunningInUserNS() { if sys.RunningInUserNS() {
break break
} }
path, parents, err := getOverlayPath(mounts[0].Options) path, parents, err := getOverlayPath(mounts[0].Options)

View File

@ -25,11 +25,11 @@ import (
// WithLease attaches a lease on the context // WithLease attaches a lease on the context
func (c *Client) WithLease(ctx context.Context, opts ...leases.Opt) (context.Context, func(context.Context) error, error) { func (c *Client) WithLease(ctx context.Context, opts ...leases.Opt) (context.Context, func(context.Context) error, error) {
nop := func(context.Context) error { return nil }
_, ok := leases.FromContext(ctx) _, ok := leases.FromContext(ctx)
if ok { if ok {
return ctx, func(context.Context) error { return ctx, nop, nil
return nil
}, nil
} }
ls := c.LeasesService() ls := c.LeasesService()
@ -44,7 +44,7 @@ func (c *Client) WithLease(ctx context.Context, opts ...leases.Opt) (context.Con
l, err := ls.Create(ctx, opts...) l, err := ls.Create(ctx, opts...)
if err != nil { if err != nil {
return nil, nil, err return ctx, nop, err
} }
ctx = leases.WithLease(ctx, l.ID) ctx = leases.WithLease(ctx, l.ID)

View File

@ -35,7 +35,7 @@ import (
// NewTaskMonitor returns a new cgroups monitor // NewTaskMonitor returns a new cgroups monitor
func NewTaskMonitor(ctx context.Context, publisher events.Publisher, ns *metrics.Namespace) (runtime.TaskMonitor, error) { func NewTaskMonitor(ctx context.Context, publisher events.Publisher, ns *metrics.Namespace) (runtime.TaskMonitor, error) {
collector := newCollector(ns) collector := NewCollector(ns)
oom, err := newOOMCollector(ns) oom, err := newOOMCollector(ns)
if err != nil { if err != nil {
return nil, err return nil, err
@ -49,7 +49,7 @@ func NewTaskMonitor(ctx context.Context, publisher events.Publisher, ns *metrics
} }
type cgroupsMonitor struct { type cgroupsMonitor struct {
collector *collector collector *Collector
oom *oomCollector oom *oomCollector
context context.Context context context.Context
publisher events.Publisher publisher events.Publisher

View File

@ -24,6 +24,9 @@ import (
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
) )
// IDName is the name that is used to identify the id being collected in the metric
var IDName = "container_id"
type value struct { type value struct {
v float64 v float64
l []string l []string
@ -41,7 +44,7 @@ type metric struct {
func (m *metric) desc(ns *metrics.Namespace) *prometheus.Desc { func (m *metric) desc(ns *metrics.Namespace) *prometheus.Desc {
// the namespace label is for containerd namespaces // the namespace label is for containerd namespaces
return ns.NewDesc(m.name, m.help, m.unit, append([]string{"container_id", "namespace"}, m.labels...)...) return ns.NewDesc(m.name, m.help, m.unit, append([]string{IDName, "namespace"}, m.labels...)...)
} }
func (m *metric) collect(id, namespace string, stats *v1.Metrics, ns *metrics.Namespace, ch chan<- prometheus.Metric, block bool) { func (m *metric) collect(id, namespace string, stats *v1.Metrics, ns *metrics.Namespace, ch chan<- prometheus.Metric, block bool) {

View File

@ -27,26 +27,33 @@ import (
"github.com/containerd/containerd/log" "github.com/containerd/containerd/log"
v1 "github.com/containerd/containerd/metrics/types/v1" v1 "github.com/containerd/containerd/metrics/types/v1"
"github.com/containerd/containerd/namespaces" "github.com/containerd/containerd/namespaces"
"github.com/containerd/containerd/runtime"
"github.com/containerd/typeurl" "github.com/containerd/typeurl"
metrics "github.com/docker/go-metrics" metrics "github.com/docker/go-metrics"
"github.com/gogo/protobuf/types"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
) )
// Statable type that returns cgroup metrics
type Statable interface {
ID() string
Namespace() string
Stats(context.Context) (*types.Any, error)
}
// Trigger will be called when an event happens and provides the cgroup // Trigger will be called when an event happens and provides the cgroup
// where the event originated from // where the event originated from
type Trigger func(string, string, cgroups.Cgroup) type Trigger func(string, string, cgroups.Cgroup)
// newCollector registers the collector with the provided namespace and returns it so // NewCollector registers the collector with the provided namespace and returns it so
// that cgroups can be added for collection // that cgroups can be added for collection
func newCollector(ns *metrics.Namespace) *collector { func NewCollector(ns *metrics.Namespace) *Collector {
if ns == nil { if ns == nil {
return &collector{} return &Collector{}
} }
// add machine cpus and memory info // add machine cpus and memory info
c := &collector{ c := &Collector{
ns: ns, ns: ns,
tasks: make(map[string]runtime.Task), tasks: make(map[string]Statable),
} }
c.metrics = append(c.metrics, pidMetrics...) c.metrics = append(c.metrics, pidMetrics...)
c.metrics = append(c.metrics, cpuMetrics...) c.metrics = append(c.metrics, cpuMetrics...)
@ -62,24 +69,26 @@ func taskID(id, namespace string) string {
return fmt.Sprintf("%s-%s", id, namespace) return fmt.Sprintf("%s-%s", id, namespace)
} }
// collector provides the ability to collect container stats and export // Collector provides the ability to collect container stats and export
// them in the prometheus format // them in the prometheus format
type collector struct { type Collector struct {
mu sync.RWMutex mu sync.RWMutex
tasks map[string]runtime.Task tasks map[string]Statable
ns *metrics.Namespace ns *metrics.Namespace
metrics []*metric metrics []*metric
storedMetrics chan prometheus.Metric storedMetrics chan prometheus.Metric
} }
func (c *collector) Describe(ch chan<- *prometheus.Desc) { // Describe prometheus metrics
func (c *Collector) Describe(ch chan<- *prometheus.Desc) {
for _, m := range c.metrics { for _, m := range c.metrics {
ch <- m.desc(c.ns) ch <- m.desc(c.ns)
} }
} }
func (c *collector) Collect(ch chan<- prometheus.Metric) { // Collect prometheus metrics
func (c *Collector) Collect(ch chan<- prometheus.Metric) {
c.mu.RLock() c.mu.RLock()
wg := &sync.WaitGroup{} wg := &sync.WaitGroup{}
for _, t := range c.tasks { for _, t := range c.tasks {
@ -100,7 +109,7 @@ storedLoop:
wg.Wait() wg.Wait()
} }
func (c *collector) collect(t runtime.Task, ch chan<- prometheus.Metric, block bool, wg *sync.WaitGroup) { func (c *Collector) collect(t Statable, ch chan<- prometheus.Metric, block bool, wg *sync.WaitGroup) {
if wg != nil { if wg != nil {
defer wg.Done() defer wg.Done()
} }
@ -126,7 +135,7 @@ func (c *collector) collect(t runtime.Task, ch chan<- prometheus.Metric, block b
} }
// Add adds the provided cgroup and id so that metrics are collected and exported // Add adds the provided cgroup and id so that metrics are collected and exported
func (c *collector) Add(t runtime.Task) error { func (c *Collector) Add(t Statable) error {
if c.ns == nil { if c.ns == nil {
return nil return nil
} }
@ -141,11 +150,21 @@ func (c *collector) Add(t runtime.Task) error {
} }
// Remove removes the provided cgroup by id from the collector // Remove removes the provided cgroup by id from the collector
func (c *collector) Remove(t runtime.Task) { func (c *Collector) Remove(t Statable) {
if c.ns == nil { if c.ns == nil {
return return
} }
c.mu.Lock() c.mu.Lock()
defer c.mu.Unlock()
delete(c.tasks, taskID(t.ID(), t.Namespace())) delete(c.tasks, taskID(t.ID(), t.Namespace()))
c.mu.Unlock()
}
// RemoveAll statable items from the collector
func (c *Collector) RemoveAll() {
if c.ns == nil {
return
}
c.mu.Lock()
c.tasks = make(map[string]Statable)
c.mu.Unlock()
} }

View File

@ -30,7 +30,7 @@ import (
// NewTaskMonitor returns a new cgroups monitor // NewTaskMonitor returns a new cgroups monitor
func NewTaskMonitor(ctx context.Context, publisher events.Publisher, ns *metrics.Namespace) (runtime.TaskMonitor, error) { func NewTaskMonitor(ctx context.Context, publisher events.Publisher, ns *metrics.Namespace) (runtime.TaskMonitor, error) {
collector := newCollector(ns) collector := NewCollector(ns)
return &cgroupsMonitor{ return &cgroupsMonitor{
collector: collector, collector: collector,
context: ctx, context: ctx,
@ -39,7 +39,7 @@ func NewTaskMonitor(ctx context.Context, publisher events.Publisher, ns *metrics
} }
type cgroupsMonitor struct { type cgroupsMonitor struct {
collector *collector collector *Collector
context context.Context context context.Context
publisher events.Publisher publisher events.Publisher
} }

View File

@ -24,6 +24,9 @@ import (
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
) )
// IDName is the name that is used to identify the id being collected in the metric
var IDName = "container_id"
type value struct { type value struct {
v float64 v float64
l []string l []string
@ -41,7 +44,7 @@ type metric struct {
func (m *metric) desc(ns *metrics.Namespace) *prometheus.Desc { func (m *metric) desc(ns *metrics.Namespace) *prometheus.Desc {
// the namespace label is for containerd namespaces // the namespace label is for containerd namespaces
return ns.NewDesc(m.name, m.help, m.unit, append([]string{"container_id", "namespace"}, m.labels...)...) return ns.NewDesc(m.name, m.help, m.unit, append([]string{IDName, "namespace"}, m.labels...)...)
} }
func (m *metric) collect(id, namespace string, stats *v2.Metrics, ns *metrics.Namespace, ch chan<- prometheus.Metric, block bool) { func (m *metric) collect(id, namespace string, stats *v2.Metrics, ns *metrics.Namespace, ch chan<- prometheus.Metric, block bool) {

View File

@ -26,21 +26,28 @@ import (
"github.com/containerd/containerd/log" "github.com/containerd/containerd/log"
v2 "github.com/containerd/containerd/metrics/types/v2" v2 "github.com/containerd/containerd/metrics/types/v2"
"github.com/containerd/containerd/namespaces" "github.com/containerd/containerd/namespaces"
"github.com/containerd/containerd/runtime"
"github.com/containerd/typeurl" "github.com/containerd/typeurl"
metrics "github.com/docker/go-metrics" metrics "github.com/docker/go-metrics"
"github.com/gogo/protobuf/types"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
) )
// newCollector registers the collector with the provided namespace and returns it so // Statable type that returns cgroup metrics
type Statable interface {
ID() string
Namespace() string
Stats(context.Context) (*types.Any, error)
}
// NewCollector registers the collector with the provided namespace and returns it so
// that cgroups can be added for collection // that cgroups can be added for collection
func newCollector(ns *metrics.Namespace) *collector { func NewCollector(ns *metrics.Namespace) *Collector {
if ns == nil { if ns == nil {
return &collector{} return &Collector{}
} }
c := &collector{ c := &Collector{
ns: ns, ns: ns,
tasks: make(map[string]runtime.Task), tasks: make(map[string]Statable),
} }
c.metrics = append(c.metrics, pidMetrics...) c.metrics = append(c.metrics, pidMetrics...)
c.metrics = append(c.metrics, cpuMetrics...) c.metrics = append(c.metrics, cpuMetrics...)
@ -55,24 +62,26 @@ func taskID(id, namespace string) string {
return fmt.Sprintf("%s-%s", id, namespace) return fmt.Sprintf("%s-%s", id, namespace)
} }
// collector provides the ability to collect container stats and export // Collector provides the ability to collect container stats and export
// them in the prometheus format // them in the prometheus format
type collector struct { type Collector struct {
mu sync.RWMutex mu sync.RWMutex
tasks map[string]runtime.Task tasks map[string]Statable
ns *metrics.Namespace ns *metrics.Namespace
metrics []*metric metrics []*metric
storedMetrics chan prometheus.Metric storedMetrics chan prometheus.Metric
} }
func (c *collector) Describe(ch chan<- *prometheus.Desc) { // Describe prometheus metrics
func (c *Collector) Describe(ch chan<- *prometheus.Desc) {
for _, m := range c.metrics { for _, m := range c.metrics {
ch <- m.desc(c.ns) ch <- m.desc(c.ns)
} }
} }
func (c *collector) Collect(ch chan<- prometheus.Metric) { // Collect prometheus metrics
func (c *Collector) Collect(ch chan<- prometheus.Metric) {
c.mu.RLock() c.mu.RLock()
wg := &sync.WaitGroup{} wg := &sync.WaitGroup{}
for _, t := range c.tasks { for _, t := range c.tasks {
@ -93,7 +102,7 @@ storedLoop:
wg.Wait() wg.Wait()
} }
func (c *collector) collect(t runtime.Task, ch chan<- prometheus.Metric, block bool, wg *sync.WaitGroup) { func (c *Collector) collect(t Statable, ch chan<- prometheus.Metric, block bool, wg *sync.WaitGroup) {
if wg != nil { if wg != nil {
defer wg.Done() defer wg.Done()
} }
@ -119,7 +128,7 @@ func (c *collector) collect(t runtime.Task, ch chan<- prometheus.Metric, block b
} }
// Add adds the provided cgroup and id so that metrics are collected and exported // Add adds the provided cgroup and id so that metrics are collected and exported
func (c *collector) Add(t runtime.Task) error { func (c *Collector) Add(t Statable) error {
if c.ns == nil { if c.ns == nil {
return nil return nil
} }
@ -134,7 +143,7 @@ func (c *collector) Add(t runtime.Task) error {
} }
// Remove removes the provided cgroup by id from the collector // Remove removes the provided cgroup by id from the collector
func (c *collector) Remove(t runtime.Task) { func (c *Collector) Remove(t Statable) {
if c.ns == nil { if c.ns == nil {
return return
} }
@ -142,3 +151,13 @@ func (c *collector) Remove(t runtime.Task) {
defer c.mu.Unlock() defer c.mu.Unlock()
delete(c.tasks, taskID(t.ID(), t.Namespace())) delete(c.tasks, taskID(t.ID(), t.Namespace()))
} }
// RemoveAll statable items from the collector
func (c *Collector) RemoveAll() {
if c.ns == nil {
return
}
c.mu.Lock()
c.tasks = make(map[string]Statable)
c.mu.Unlock()
}

View File

@ -45,10 +45,6 @@ func parseInfoFile(r io.Reader) ([]Info, error) {
out := []Info{} out := []Info{}
var err error var err error
for s.Scan() { for s.Scan() {
if err = s.Err(); err != nil {
return nil, err
}
/* /*
See http://man7.org/linux/man-pages/man5/proc.5.html See http://man7.org/linux/man-pages/man5/proc.5.html
@ -128,6 +124,10 @@ func parseInfoFile(r io.Reader) ([]Info, error) {
out = append(out, p) out = append(out, p)
} }
if err = s.Err(); err != nil {
return nil, err
}
return out, nil return out, nil
} }

View File

@ -1238,11 +1238,11 @@ func WithEnvFile(path string) SpecOpts {
sc := bufio.NewScanner(f) sc := bufio.NewScanner(f)
for sc.Scan() { for sc.Scan() {
if sc.Err() != nil {
return sc.Err()
}
vars = append(vars, sc.Text()) vars = append(vars, sc.Text())
} }
if err = sc.Err(); err != nil {
return err
}
return WithEnv(vars)(nil, nil, nil, s) return WithEnv(vars)(nil, nil, nil, s)
} }
} }

Some files were not shown because too many files have changed in this diff Show More