go.mod: Bump hcsshim to v0.10.0-rc.1
This contains quite a bit (also bumps google/uuid to 1.3.0). Some HostProcess container improvements to get ready for whenever it goes to stable in Kubernetes, Hyper-V (windows) container support for CRI, and a plethora of other small additions and fixes. Signed-off-by: Daniel Canter <dcanter@microsoft.com>
This commit is contained in:
parent
a04268132e
commit
1f8db2467b
4
go.mod
4
go.mod
@ -5,7 +5,7 @@ go 1.18
|
|||||||
require (
|
require (
|
||||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20220706123152-fef3fe1bab07
|
github.com/AdaLogics/go-fuzz-headers v0.0.0-20220706123152-fef3fe1bab07
|
||||||
github.com/Microsoft/go-winio v0.5.2
|
github.com/Microsoft/go-winio v0.5.2
|
||||||
github.com/Microsoft/hcsshim v0.9.4
|
github.com/Microsoft/hcsshim v0.10.0-rc.1
|
||||||
github.com/container-orchestrated-devices/container-device-interface v0.3.1
|
github.com/container-orchestrated-devices/container-device-interface v0.3.1
|
||||||
github.com/containerd/aufs v1.0.0
|
github.com/containerd/aufs v1.0.0
|
||||||
github.com/containerd/btrfs v1.0.0
|
github.com/containerd/btrfs v1.0.0
|
||||||
@ -30,7 +30,7 @@ require (
|
|||||||
github.com/emicklei/go-restful/v3 v3.8.0
|
github.com/emicklei/go-restful/v3 v3.8.0
|
||||||
github.com/fsnotify/fsnotify v1.5.1
|
github.com/fsnotify/fsnotify v1.5.1
|
||||||
github.com/google/go-cmp v0.5.8
|
github.com/google/go-cmp v0.5.8
|
||||||
github.com/google/uuid v1.2.0
|
github.com/google/uuid v1.3.0
|
||||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
|
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
|
||||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
|
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
|
||||||
github.com/hashicorp/go-multierror v1.1.1
|
github.com/hashicorp/go-multierror v1.1.1
|
||||||
|
7
go.sum
7
go.sum
@ -84,8 +84,8 @@ github.com/Microsoft/hcsshim v0.8.20/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwT
|
|||||||
github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
|
github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
|
||||||
github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg=
|
github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg=
|
||||||
github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
|
github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
|
||||||
github.com/Microsoft/hcsshim v0.9.4 h1:mnUj0ivWy6UzbB1uLFqKR6F+ZyiDc7j4iGgHTpO+5+I=
|
github.com/Microsoft/hcsshim v0.10.0-rc.1 h1:Lms8jwpaIdIUvoBNee8ZuvIi1XnNy9uvnxSC9L1q1x4=
|
||||||
github.com/Microsoft/hcsshim v0.9.4/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
|
github.com/Microsoft/hcsshim v0.10.0-rc.1/go.mod h1:7XX96hdvnwWGdXnksDNdhfFcUH1BtQY6bL2L3f9Abyk=
|
||||||
github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
|
github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
|
||||||
github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
|
github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
|
||||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||||
@ -532,8 +532,9 @@ github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm4
|
|||||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs=
|
|
||||||
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
|
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||||
|
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||||
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
|
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
|
||||||
|
@ -3,7 +3,7 @@ module github.com/containerd/containerd/integration/client
|
|||||||
go 1.18
|
go 1.18
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/Microsoft/hcsshim v0.9.4
|
github.com/Microsoft/hcsshim v0.10.0-rc.1
|
||||||
github.com/Microsoft/hcsshim/test v0.0.0-20210408205431-da33ecd607e1
|
github.com/Microsoft/hcsshim/test v0.0.0-20210408205431-da33ecd607e1
|
||||||
github.com/containerd/cgroups v1.0.4
|
github.com/containerd/cgroups v1.0.4
|
||||||
github.com/containerd/containerd v1.6.2 // see replace; the actual version of containerd is replaced with the code at the root of this repository
|
github.com/containerd/containerd v1.6.2 // see replace; the actual version of containerd is replaced with the code at the root of this repository
|
||||||
@ -38,7 +38,7 @@ require (
|
|||||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||||
github.com/golang/protobuf v1.5.2 // indirect
|
github.com/golang/protobuf v1.5.2 // indirect
|
||||||
github.com/google/go-cmp v0.5.8 // indirect
|
github.com/google/go-cmp v0.5.8 // indirect
|
||||||
github.com/google/uuid v1.2.0 // indirect
|
github.com/google/uuid v1.3.0 // indirect
|
||||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||||
github.com/imdario/mergo v0.3.12 // indirect
|
github.com/imdario/mergo v0.3.12 // indirect
|
||||||
|
@ -60,8 +60,8 @@ github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v
|
|||||||
github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00=
|
github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00=
|
||||||
github.com/Microsoft/hcsshim v0.8.20/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
|
github.com/Microsoft/hcsshim v0.8.20/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
|
||||||
github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
|
github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
|
||||||
github.com/Microsoft/hcsshim v0.9.4 h1:mnUj0ivWy6UzbB1uLFqKR6F+ZyiDc7j4iGgHTpO+5+I=
|
github.com/Microsoft/hcsshim v0.10.0-rc.1 h1:Lms8jwpaIdIUvoBNee8ZuvIi1XnNy9uvnxSC9L1q1x4=
|
||||||
github.com/Microsoft/hcsshim v0.9.4/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
|
github.com/Microsoft/hcsshim v0.10.0-rc.1/go.mod h1:7XX96hdvnwWGdXnksDNdhfFcUH1BtQY6bL2L3f9Abyk=
|
||||||
github.com/Microsoft/hcsshim/test v0.0.0-20210408205431-da33ecd607e1 h1:pVKfKyPkXna29XlGjxSr9J0A7vNucOUHZ/2ClcTWalw=
|
github.com/Microsoft/hcsshim/test v0.0.0-20210408205431-da33ecd607e1 h1:pVKfKyPkXna29XlGjxSr9J0A7vNucOUHZ/2ClcTWalw=
|
||||||
github.com/Microsoft/hcsshim/test v0.0.0-20210408205431-da33ecd607e1/go.mod h1:Cmvnhlie15Ha2UYrJs9EhgSx76Bq9RV2FgfEiT78GhI=
|
github.com/Microsoft/hcsshim/test v0.0.0-20210408205431-da33ecd607e1/go.mod h1:Cmvnhlie15Ha2UYrJs9EhgSx76Bq9RV2FgfEiT78GhI=
|
||||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||||
@ -134,6 +134,7 @@ github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj
|
|||||||
github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss=
|
github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss=
|
||||||
github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
|
github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
|
||||||
github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU=
|
github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU=
|
||||||
|
github.com/containerd/cgroups v1.0.3/go.mod h1:/ofk34relqNjSGyqPrmEULrO4Sc8LJhvJmWbUCUKqj8=
|
||||||
github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA=
|
github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA=
|
||||||
github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA=
|
github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA=
|
||||||
github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
|
github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
|
||||||
@ -356,8 +357,8 @@ github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLe
|
|||||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs=
|
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||||
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||||
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
|
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
|
||||||
@ -940,6 +941,7 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220405210540-1e041c57c461/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220405210540-1e041c57c461/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k=
|
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k=
|
||||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
|
2
vendor/github.com/Microsoft/hcsshim/.gitattributes
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/.gitattributes
generated
vendored
@ -1 +1,3 @@
|
|||||||
* text=auto eol=lf
|
* text=auto eol=lf
|
||||||
|
vendor/** -text
|
||||||
|
test/vendor/** -text
|
11
vendor/github.com/Microsoft/hcsshim/.gitignore
generated
vendored
11
vendor/github.com/Microsoft/hcsshim/.gitignore
generated
vendored
@ -6,6 +6,7 @@
|
|||||||
|
|
||||||
# Ignore vscode setting files
|
# Ignore vscode setting files
|
||||||
.vscode/
|
.vscode/
|
||||||
|
.idea/
|
||||||
|
|
||||||
# Test binary, build with `go test -c`
|
# Test binary, build with `go test -c`
|
||||||
*.test
|
*.test
|
||||||
@ -23,16 +24,22 @@ service/pkg/
|
|||||||
*.img
|
*.img
|
||||||
*.vhd
|
*.vhd
|
||||||
*.tar.gz
|
*.tar.gz
|
||||||
|
*.tar
|
||||||
|
|
||||||
# Make stuff
|
# Make stuff
|
||||||
.rootfs-done
|
.rootfs-done
|
||||||
bin/*
|
bin/*
|
||||||
rootfs/*
|
rootfs/*
|
||||||
|
rootfs-conv/*
|
||||||
*.o
|
*.o
|
||||||
/build/
|
/build/
|
||||||
|
|
||||||
deps/*
|
deps/*
|
||||||
out/*
|
out/*
|
||||||
|
|
||||||
.idea/
|
# test results
|
||||||
.vscode/
|
test/results
|
||||||
|
|
||||||
|
# go workspace files
|
||||||
|
go.work
|
||||||
|
go.work.sum
|
||||||
|
11
vendor/github.com/Microsoft/hcsshim/.golangci.yml
generated
vendored
11
vendor/github.com/Microsoft/hcsshim/.golangci.yml
generated
vendored
@ -3,6 +3,7 @@ run:
|
|||||||
|
|
||||||
linters:
|
linters:
|
||||||
enable:
|
enable:
|
||||||
|
- gofmt
|
||||||
- stylecheck
|
- stylecheck
|
||||||
|
|
||||||
linters-settings:
|
linters-settings:
|
||||||
@ -28,6 +29,16 @@ issues:
|
|||||||
- stylecheck
|
- stylecheck
|
||||||
Text: "ST1003:"
|
Text: "ST1003:"
|
||||||
|
|
||||||
|
- path: cmd\\ncproxy\\nodenetsvc\\
|
||||||
|
linters:
|
||||||
|
- stylecheck
|
||||||
|
Text: "ST1003:"
|
||||||
|
|
||||||
|
- path: cmd\\ncproxy_mock\\
|
||||||
|
linters:
|
||||||
|
- stylecheck
|
||||||
|
Text: "ST1003:"
|
||||||
|
|
||||||
- path: internal\\hcs\\schema2\\
|
- path: internal\\hcs\\schema2\\
|
||||||
linters:
|
linters:
|
||||||
- stylecheck
|
- stylecheck
|
||||||
|
72
vendor/github.com/Microsoft/hcsshim/Makefile
generated
vendored
72
vendor/github.com/Microsoft/hcsshim/Makefile
generated
vendored
@ -1,4 +1,5 @@
|
|||||||
BASE:=base.tar.gz
|
BASE:=base.tar.gz
|
||||||
|
DEV_BUILD:=0
|
||||||
|
|
||||||
GO:=go
|
GO:=go
|
||||||
GO_FLAGS:=-ldflags "-s -w" # strip Go binaries
|
GO_FLAGS:=-ldflags "-s -w" # strip Go binaries
|
||||||
@ -15,13 +16,24 @@ endif
|
|||||||
GO_BUILD:=CGO_ENABLED=$(CGO_ENABLED) $(GO) build $(GO_FLAGS) $(GO_FLAGS_EXTRA)
|
GO_BUILD:=CGO_ENABLED=$(CGO_ENABLED) $(GO) build $(GO_FLAGS) $(GO_FLAGS_EXTRA)
|
||||||
|
|
||||||
SRCROOT=$(dir $(abspath $(firstword $(MAKEFILE_LIST))))
|
SRCROOT=$(dir $(abspath $(firstword $(MAKEFILE_LIST))))
|
||||||
|
# additional directories to search for rule prerequisites and targets
|
||||||
|
VPATH=$(SRCROOT)
|
||||||
|
|
||||||
|
DELTA_TARGET=out/delta.tar.gz
|
||||||
|
|
||||||
|
ifeq "$(DEV_BUILD)" "1"
|
||||||
|
DELTA_TARGET=out/delta-dev.tar.gz
|
||||||
|
endif
|
||||||
|
|
||||||
# The link aliases for gcstools
|
# The link aliases for gcstools
|
||||||
GCS_TOOLS=\
|
GCS_TOOLS=\
|
||||||
generichook
|
generichook \
|
||||||
|
install-drivers
|
||||||
|
|
||||||
.PHONY: all always rootfs test
|
.PHONY: all always rootfs test
|
||||||
|
|
||||||
|
.DEFAULT_GOAL := all
|
||||||
|
|
||||||
all: out/initrd.img out/rootfs.tar.gz
|
all: out/initrd.img out/rootfs.tar.gz
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
@ -29,21 +41,13 @@ clean:
|
|||||||
rm -rf bin deps rootfs out
|
rm -rf bin deps rootfs out
|
||||||
|
|
||||||
test:
|
test:
|
||||||
cd $(SRCROOT) && go test -v ./internal/guest/...
|
cd $(SRCROOT) && $(GO) test -v ./internal/guest/...
|
||||||
|
|
||||||
out/delta.tar.gz: bin/init bin/vsockexec bin/cmd/gcs bin/cmd/gcstools Makefile
|
rootfs: out/rootfs.vhd
|
||||||
@mkdir -p out
|
|
||||||
rm -rf rootfs
|
out/rootfs.vhd: out/rootfs.tar.gz bin/cmd/tar2ext4
|
||||||
mkdir -p rootfs/bin/
|
gzip -f -d ./out/rootfs.tar.gz
|
||||||
cp bin/init rootfs/
|
bin/cmd/tar2ext4 -vhd -i ./out/rootfs.tar -o $@
|
||||||
cp bin/vsockexec rootfs/bin/
|
|
||||||
cp bin/cmd/gcs rootfs/bin/
|
|
||||||
cp bin/cmd/gcstools rootfs/bin/
|
|
||||||
for tool in $(GCS_TOOLS); do ln -s gcstools rootfs/bin/$$tool; done
|
|
||||||
git -C $(SRCROOT) rev-parse HEAD > rootfs/gcs.commit && \
|
|
||||||
git -C $(SRCROOT) rev-parse --abbrev-ref HEAD > rootfs/gcs.branch
|
|
||||||
tar -zcf $@ -C rootfs .
|
|
||||||
rm -rf rootfs
|
|
||||||
|
|
||||||
out/rootfs.tar.gz: out/initrd.img
|
out/rootfs.tar.gz: out/initrd.img
|
||||||
rm -rf rootfs-conv
|
rm -rf rootfs-conv
|
||||||
@ -52,13 +56,45 @@ out/rootfs.tar.gz: out/initrd.img
|
|||||||
tar -zcf $@ -C rootfs-conv .
|
tar -zcf $@ -C rootfs-conv .
|
||||||
rm -rf rootfs-conv
|
rm -rf rootfs-conv
|
||||||
|
|
||||||
out/initrd.img: $(BASE) out/delta.tar.gz $(SRCROOT)/hack/catcpio.sh
|
out/initrd.img: $(BASE) $(DELTA_TARGET) $(SRCROOT)/hack/catcpio.sh
|
||||||
$(SRCROOT)/hack/catcpio.sh "$(BASE)" out/delta.tar.gz > out/initrd.img.uncompressed
|
$(SRCROOT)/hack/catcpio.sh "$(BASE)" $(DELTA_TARGET) > out/initrd.img.uncompressed
|
||||||
gzip -c out/initrd.img.uncompressed > $@
|
gzip -c out/initrd.img.uncompressed > $@
|
||||||
rm out/initrd.img.uncompressed
|
rm out/initrd.img.uncompressed
|
||||||
|
|
||||||
|
# This target includes utilities which may be useful for testing purposes.
|
||||||
|
out/delta-dev.tar.gz: out/delta.tar.gz bin/internal/tools/snp-report
|
||||||
|
rm -rf rootfs-dev
|
||||||
|
mkdir rootfs-dev
|
||||||
|
tar -xzf out/delta.tar.gz -C rootfs-dev
|
||||||
|
cp bin/internal/tools/snp-report rootfs-dev/bin/
|
||||||
|
tar -zcf $@ -C rootfs-dev .
|
||||||
|
rm -rf rootfs-dev
|
||||||
|
|
||||||
|
out/delta.tar.gz: bin/init bin/vsockexec bin/cmd/gcs bin/cmd/gcstools bin/cmd/hooks/wait-paths Makefile
|
||||||
|
@mkdir -p out
|
||||||
|
rm -rf rootfs
|
||||||
|
mkdir -p rootfs/bin/
|
||||||
|
mkdir -p rootfs/info/
|
||||||
|
cp bin/init rootfs/
|
||||||
|
cp bin/vsockexec rootfs/bin/
|
||||||
|
cp bin/cmd/gcs rootfs/bin/
|
||||||
|
cp bin/cmd/gcstools rootfs/bin/
|
||||||
|
cp bin/cmd/hooks/wait-paths rootfs/bin/
|
||||||
|
for tool in $(GCS_TOOLS); do ln -s gcstools rootfs/bin/$$tool; done
|
||||||
|
git -C $(SRCROOT) rev-parse HEAD > rootfs/info/gcs.commit && \
|
||||||
|
git -C $(SRCROOT) rev-parse --abbrev-ref HEAD > rootfs/info/gcs.branch && \
|
||||||
|
date --iso-8601=minute --utc > rootfs/info/tar.date
|
||||||
|
$(if $(and $(realpath $(subst .tar,.testdata.json,$(BASE))), $(shell which jq)), \
|
||||||
|
jq -r '.IMAGE_NAME' $(subst .tar,.testdata.json,$(BASE)) 2>/dev/null > rootfs/info/image.name && \
|
||||||
|
jq -r '.DATETIME' $(subst .tar,.testdata.json,$(BASE)) 2>/dev/null > rootfs/info/build.date)
|
||||||
|
tar -zcf $@ -C rootfs .
|
||||||
|
rm -rf rootfs
|
||||||
|
|
||||||
-include deps/cmd/gcs.gomake
|
-include deps/cmd/gcs.gomake
|
||||||
-include deps/cmd/gcstools.gomake
|
-include deps/cmd/gcstools.gomake
|
||||||
|
-include deps/cmd/hooks/wait-paths.gomake
|
||||||
|
-include deps/cmd/tar2ext4.gomake
|
||||||
|
-include deps/internal/tools/snp-report.gomake
|
||||||
|
|
||||||
# Implicit rule for includes that define Go targets.
|
# Implicit rule for includes that define Go targets.
|
||||||
%.gomake: $(SRCROOT)/Makefile
|
%.gomake: $(SRCROOT)/Makefile
|
||||||
@ -72,8 +108,6 @@ out/initrd.img: $(BASE) out/delta.tar.gz $(SRCROOT)/hack/catcpio.sh
|
|||||||
@/bin/echo -e '-include $(@:%.gomake=%.godeps)' >> $@.new
|
@/bin/echo -e '-include $(@:%.gomake=%.godeps)' >> $@.new
|
||||||
mv $@.new $@
|
mv $@.new $@
|
||||||
|
|
||||||
VPATH=$(SRCROOT)
|
|
||||||
|
|
||||||
bin/vsockexec: vsockexec/vsockexec.o vsockexec/vsock.o
|
bin/vsockexec: vsockexec/vsockexec.o vsockexec/vsock.o
|
||||||
@mkdir -p bin
|
@mkdir -p bin
|
||||||
$(CC) $(LDFLAGS) -o $@ $^
|
$(CC) $(LDFLAGS) -o $@ $^
|
||||||
|
11
vendor/github.com/Microsoft/hcsshim/Protobuild.toml
generated
vendored
11
vendor/github.com/Microsoft/hcsshim/Protobuild.toml
generated
vendored
@ -1,4 +1,4 @@
|
|||||||
version = "unstable"
|
version = "1"
|
||||||
generator = "gogoctrd"
|
generator = "gogoctrd"
|
||||||
plugins = ["grpc", "fieldpath"]
|
plugins = ["grpc", "fieldpath"]
|
||||||
|
|
||||||
@ -14,11 +14,6 @@ plugins = ["grpc", "fieldpath"]
|
|||||||
# target package.
|
# target package.
|
||||||
packages = ["github.com/gogo/protobuf"]
|
packages = ["github.com/gogo/protobuf"]
|
||||||
|
|
||||||
# Paths that will be added untouched to the end of the includes. We use
|
|
||||||
# `/usr/local/include` to pickup the common install location of protobuf.
|
|
||||||
# This is the default.
|
|
||||||
after = ["/usr/local/include"]
|
|
||||||
|
|
||||||
# This section maps protobuf imports to Go packages. These will become
|
# This section maps protobuf imports to Go packages. These will become
|
||||||
# `-M` directives in the call to the go protobuf generator.
|
# `-M` directives in the call to the go protobuf generator.
|
||||||
[packages]
|
[packages]
|
||||||
@ -36,6 +31,10 @@ plugins = ["grpc", "fieldpath"]
|
|||||||
prefixes = ["github.com/Microsoft/hcsshim/internal/shimdiag"]
|
prefixes = ["github.com/Microsoft/hcsshim/internal/shimdiag"]
|
||||||
plugins = ["ttrpc"]
|
plugins = ["ttrpc"]
|
||||||
|
|
||||||
|
[[overrides]]
|
||||||
|
prefixes = ["github.com/Microsoft/hcsshim/internal/extendedtask"]
|
||||||
|
plugins = ["ttrpc"]
|
||||||
|
|
||||||
[[overrides]]
|
[[overrides]]
|
||||||
prefixes = ["github.com/Microsoft/hcsshim/internal/computeagent"]
|
prefixes = ["github.com/Microsoft/hcsshim/internal/computeagent"]
|
||||||
plugins = ["ttrpc"]
|
plugins = ["ttrpc"]
|
||||||
|
20
vendor/github.com/Microsoft/hcsshim/README.md
generated
vendored
20
vendor/github.com/Microsoft/hcsshim/README.md
generated
vendored
@ -75,24 +75,6 @@ certify they either authored the work themselves or otherwise have permission to
|
|||||||
more info, as well as to make sure that you can attest to the rules listed. Our CI uses the [DCO Github app](https://github.com/apps/dco) to ensure
|
more info, as well as to make sure that you can attest to the rules listed. Our CI uses the [DCO Github app](https://github.com/apps/dco) to ensure
|
||||||
that all commits in a given PR are signed-off.
|
that all commits in a given PR are signed-off.
|
||||||
|
|
||||||
### Test Directory (Important to note)
|
|
||||||
|
|
||||||
This project has tried to trim some dependencies from the root Go modules file that would be cumbersome to get transitively included if this
|
|
||||||
project is being vendored/used as a library. Some of these dependencies were only being used for tests, so the /test directory in this project also has
|
|
||||||
its own go.mod file where these are now included to get around this issue. Our tests rely on the code in this project to run, so the test Go modules file
|
|
||||||
has a relative path replace directive to pull in the latest hcsshim code that the tests actually touch from this project
|
|
||||||
(which is the repo itself on your disk).
|
|
||||||
|
|
||||||
```
|
|
||||||
replace (
|
|
||||||
github.com/Microsoft/hcsshim => ../
|
|
||||||
)
|
|
||||||
```
|
|
||||||
|
|
||||||
Because of this, for most code changes you may need to run `go mod vendor` + `go mod tidy` in the /test directory in this repository, as the
|
|
||||||
CI in this project will check if the files are out of date and will fail if this is true.
|
|
||||||
|
|
||||||
|
|
||||||
## Code of Conduct
|
## Code of Conduct
|
||||||
|
|
||||||
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
|
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
|
||||||
@ -101,7 +83,7 @@ contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additio
|
|||||||
|
|
||||||
## Dependencies
|
## Dependencies
|
||||||
|
|
||||||
This project requires Golang 1.9 or newer to build.
|
This project requires Golang 1.17 or newer to build.
|
||||||
|
|
||||||
For system requirements to run this project, see the Microsoft docs on [Windows Container requirements](https://docs.microsoft.com/en-us/virtualization/windowscontainers/deploy-containers/system-requirements).
|
For system requirements to run this project, see the Microsoft docs on [Windows Container requirements](https://docs.microsoft.com/en-us/virtualization/windowscontainers/deploy-containers/system-requirements).
|
||||||
|
|
||||||
|
41
vendor/github.com/Microsoft/hcsshim/SECURITY.md
generated
vendored
Normal file
41
vendor/github.com/Microsoft/hcsshim/SECURITY.md
generated
vendored
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
<!-- BEGIN MICROSOFT SECURITY.MD V0.0.7 BLOCK -->
|
||||||
|
|
||||||
|
## Security
|
||||||
|
|
||||||
|
Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/).
|
||||||
|
|
||||||
|
If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below.
|
||||||
|
|
||||||
|
## Reporting Security Issues
|
||||||
|
|
||||||
|
**Please do not report security vulnerabilities through public GitHub issues.**
|
||||||
|
|
||||||
|
Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report).
|
||||||
|
|
||||||
|
If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey).
|
||||||
|
|
||||||
|
You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc).
|
||||||
|
|
||||||
|
Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue:
|
||||||
|
|
||||||
|
* Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.)
|
||||||
|
* Full paths of source file(s) related to the manifestation of the issue
|
||||||
|
* The location of the affected source code (tag/branch/commit or direct URL)
|
||||||
|
* Any special configuration required to reproduce the issue
|
||||||
|
* Step-by-step instructions to reproduce the issue
|
||||||
|
* Proof-of-concept or exploit code (if possible)
|
||||||
|
* Impact of the issue, including how an attacker might exploit the issue
|
||||||
|
|
||||||
|
This information will help us triage your report more quickly.
|
||||||
|
|
||||||
|
If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs.
|
||||||
|
|
||||||
|
## Preferred Languages
|
||||||
|
|
||||||
|
We prefer all communications to be in English.
|
||||||
|
|
||||||
|
## Policy
|
||||||
|
|
||||||
|
Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd).
|
||||||
|
|
||||||
|
<!-- END MICROSOFT SECURITY.MD BLOCK -->
|
800
vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options/runhcs.pb.go
generated
vendored
800
vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options/runhcs.pb.go
generated
vendored
@ -6,10 +6,12 @@ package options
|
|||||||
import (
|
import (
|
||||||
fmt "fmt"
|
fmt "fmt"
|
||||||
proto "github.com/gogo/protobuf/proto"
|
proto "github.com/gogo/protobuf/proto"
|
||||||
|
github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
|
||||||
_ "github.com/gogo/protobuf/types"
|
_ "github.com/gogo/protobuf/types"
|
||||||
github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
|
github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
|
||||||
io "io"
|
io "io"
|
||||||
math "math"
|
math "math"
|
||||||
|
math_bits "math/bits"
|
||||||
reflect "reflect"
|
reflect "reflect"
|
||||||
strings "strings"
|
strings "strings"
|
||||||
time "time"
|
time "time"
|
||||||
@ -25,7 +27,7 @@ var _ = time.Kitchen
|
|||||||
// is compatible with the proto package it is being compiled against.
|
// is compatible with the proto package it is being compiled against.
|
||||||
// A compilation error at this line likely means your copy of the
|
// A compilation error at this line likely means your copy of the
|
||||||
// proto package needs to be updated.
|
// proto package needs to be updated.
|
||||||
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
|
||||||
|
|
||||||
type Options_DebugType int32
|
type Options_DebugType int32
|
||||||
|
|
||||||
@ -144,6 +146,13 @@ type Options struct {
|
|||||||
// The typical example is if Containerd has restarted but is expected to come back online. A 0 for this field is interpreted as an infinite
|
// The typical example is if Containerd has restarted but is expected to come back online. A 0 for this field is interpreted as an infinite
|
||||||
// timeout.
|
// timeout.
|
||||||
IoRetryTimeoutInSec int32 `protobuf:"varint,17,opt,name=io_retry_timeout_in_sec,json=ioRetryTimeoutInSec,proto3" json:"io_retry_timeout_in_sec,omitempty"`
|
IoRetryTimeoutInSec int32 `protobuf:"varint,17,opt,name=io_retry_timeout_in_sec,json=ioRetryTimeoutInSec,proto3" json:"io_retry_timeout_in_sec,omitempty"`
|
||||||
|
// default_container_annotations specifies a set of annotations that should be set for every workload container
|
||||||
|
DefaultContainerAnnotations map[string]string `protobuf:"bytes,18,rep,name=default_container_annotations,json=defaultContainerAnnotations,proto3" json:"default_container_annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||||
|
// no_inherit_host_timezone specifies to skip inheriting the hosts time zone for WCOW UVMs and instead default to
|
||||||
|
// UTC.
|
||||||
|
NoInheritHostTimezone bool `protobuf:"varint,19,opt,name=no_inherit_host_timezone,json=noInheritHostTimezone,proto3" json:"no_inherit_host_timezone,omitempty"`
|
||||||
|
// scrub_logs enables removing environment variables and other potentially sensitive information from logs
|
||||||
|
ScrubLogs bool `protobuf:"varint,20,opt,name=scrub_logs,json=scrubLogs,proto3" json:"scrub_logs,omitempty"`
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
XXX_sizecache int32 `json:"-"`
|
XXX_sizecache int32 `json:"-"`
|
||||||
@ -162,7 +171,7 @@ func (m *Options) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|||||||
return xxx_messageInfo_Options.Marshal(b, m, deterministic)
|
return xxx_messageInfo_Options.Marshal(b, m, deterministic)
|
||||||
} else {
|
} else {
|
||||||
b = b[:cap(b)]
|
b = b[:cap(b)]
|
||||||
n, err := m.MarshalTo(b)
|
n, err := m.MarshalToSizedBuffer(b)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -211,7 +220,7 @@ func (m *ProcessDetails) XXX_Marshal(b []byte, deterministic bool) ([]byte, erro
|
|||||||
return xxx_messageInfo_ProcessDetails.Marshal(b, m, deterministic)
|
return xxx_messageInfo_ProcessDetails.Marshal(b, m, deterministic)
|
||||||
} else {
|
} else {
|
||||||
b = b[:cap(b)]
|
b = b[:cap(b)]
|
||||||
n, err := m.MarshalTo(b)
|
n, err := m.MarshalToSizedBuffer(b)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -234,6 +243,7 @@ func init() {
|
|||||||
proto.RegisterEnum("containerd.runhcs.v1.Options_DebugType", Options_DebugType_name, Options_DebugType_value)
|
proto.RegisterEnum("containerd.runhcs.v1.Options_DebugType", Options_DebugType_name, Options_DebugType_value)
|
||||||
proto.RegisterEnum("containerd.runhcs.v1.Options_SandboxIsolation", Options_SandboxIsolation_name, Options_SandboxIsolation_value)
|
proto.RegisterEnum("containerd.runhcs.v1.Options_SandboxIsolation", Options_SandboxIsolation_name, Options_SandboxIsolation_value)
|
||||||
proto.RegisterType((*Options)(nil), "containerd.runhcs.v1.Options")
|
proto.RegisterType((*Options)(nil), "containerd.runhcs.v1.Options")
|
||||||
|
proto.RegisterMapType((map[string]string)(nil), "containerd.runhcs.v1.Options.DefaultContainerAnnotationsEntry")
|
||||||
proto.RegisterType((*ProcessDetails)(nil), "containerd.runhcs.v1.ProcessDetails")
|
proto.RegisterType((*ProcessDetails)(nil), "containerd.runhcs.v1.ProcessDetails")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -242,73 +252,80 @@ func init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var fileDescriptor_b643df6839c75082 = []byte{
|
var fileDescriptor_b643df6839c75082 = []byte{
|
||||||
// 953 bytes of a gzipped FileDescriptorProto
|
// 1072 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x5d, 0x6f, 0xdb, 0x36,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x4b, 0x6f, 0xe3, 0x36,
|
||||||
0x17, 0xb6, 0xda, 0x24, 0xb6, 0x4e, 0xbe, 0x1c, 0x36, 0x40, 0x85, 0xe4, 0xad, 0x6d, 0xa4, 0x2f,
|
0x17, 0xb5, 0xf2, 0xb4, 0x98, 0x97, 0xc3, 0xf8, 0xc3, 0x08, 0xc9, 0x37, 0xb6, 0x91, 0x29, 0x30,
|
||||||
0xd0, 0x14, 0x6b, 0xa4, 0xa4, 0xdb, 0xdd, 0x06, 0x0c, 0x8d, 0xed, 0xb4, 0x1e, 0xf2, 0x61, 0xc8,
|
0x19, 0x74, 0x22, 0x27, 0x69, 0x81, 0x16, 0x6d, 0xd1, 0x22, 0xb1, 0x9d, 0x89, 0x8b, 0x3c, 0x0c,
|
||||||
0x59, 0xba, 0x8f, 0x0b, 0x42, 0x1f, 0x8c, 0x4c, 0x54, 0x12, 0x05, 0x92, 0xf6, 0xe2, 0x5e, 0xed,
|
0xd9, 0xcd, 0xf4, 0xb1, 0x20, 0xf4, 0x60, 0x64, 0x22, 0x92, 0x28, 0x90, 0x94, 0x1b, 0x67, 0x55,
|
||||||
0x27, 0xec, 0x87, 0xec, 0x87, 0xe4, 0x72, 0x97, 0x03, 0x06, 0x64, 0xab, 0x7f, 0xc9, 0x40, 0x8a,
|
0xf4, 0x17, 0xf4, 0x67, 0x65, 0xd9, 0x65, 0x8b, 0x02, 0x69, 0xc7, 0xbf, 0xa4, 0x20, 0x45, 0x25,
|
||||||
0x4a, 0xbb, 0x20, 0xd8, 0xcd, 0xae, 0x4c, 0x3e, 0xcf, 0xc3, 0x87, 0xe7, 0x1c, 0x9d, 0x43, 0xc3,
|
0x33, 0x41, 0xda, 0x59, 0x74, 0x65, 0xf2, 0x9c, 0xc3, 0xc3, 0x7b, 0xaf, 0x78, 0xaf, 0xc1, 0x59,
|
||||||
0x59, 0x42, 0xe5, 0x78, 0x12, 0xba, 0x11, 0xcb, 0xbc, 0x13, 0x1a, 0x71, 0x26, 0xd8, 0xa5, 0xf4,
|
0x48, 0xc4, 0x30, 0xf3, 0x6c, 0x9f, 0xc6, 0xcd, 0x13, 0xe2, 0x33, 0xca, 0xe9, 0x85, 0x68, 0x0e,
|
||||||
0xc6, 0x91, 0x10, 0x63, 0x9a, 0x79, 0x51, 0x16, 0x7b, 0x11, 0xcb, 0x65, 0x40, 0x73, 0xc2, 0xe3,
|
0x7d, 0xce, 0x87, 0x24, 0x6e, 0xfa, 0x71, 0xd0, 0xf4, 0x69, 0x22, 0x5c, 0x92, 0x60, 0x16, 0x6c,
|
||||||
0x3d, 0x85, 0xed, 0xf1, 0x49, 0x3e, 0x8e, 0xc4, 0xde, 0xf4, 0xc0, 0x63, 0x85, 0xa4, 0x2c, 0x17,
|
0x4b, 0x6c, 0x9b, 0x65, 0xc9, 0xd0, 0xe7, 0xdb, 0xa3, 0xdd, 0x26, 0x4d, 0x05, 0xa1, 0x09, 0x6f,
|
||||||
0x5e, 0x89, 0xb8, 0x05, 0x67, 0x92, 0xa1, 0xcd, 0x8f, 0x7a, 0xd7, 0x10, 0xd3, 0x83, 0xad, 0xcd,
|
0xe6, 0x88, 0x9d, 0x32, 0x2a, 0x28, 0xac, 0xde, 0xeb, 0x6d, 0x4d, 0x8c, 0x76, 0xd7, 0xab, 0x21,
|
||||||
0x84, 0x25, 0x4c, 0x0b, 0x3c, 0xb5, 0x2a, 0xb5, 0x5b, 0xed, 0x84, 0xb1, 0x24, 0x25, 0x9e, 0xde,
|
0x0d, 0xa9, 0x12, 0x34, 0xe5, 0x2a, 0xd7, 0xae, 0xd7, 0x43, 0x4a, 0xc3, 0x08, 0x37, 0xd5, 0xce,
|
||||||
0x85, 0x93, 0x4b, 0x4f, 0xd2, 0x8c, 0x08, 0x19, 0x64, 0x45, 0x29, 0xd8, 0xf9, 0xb5, 0x0e, 0xf5,
|
0xcb, 0x2e, 0x9a, 0x82, 0xc4, 0x98, 0x0b, 0x37, 0x4e, 0x73, 0xc1, 0xe6, 0xef, 0x26, 0x98, 0x3f,
|
||||||
0xb3, 0xf2, 0x16, 0xb4, 0x09, 0x8b, 0x31, 0x09, 0x27, 0x89, 0x63, 0x75, 0xac, 0xdd, 0x86, 0x5f,
|
0xcb, 0x6f, 0x81, 0x55, 0x30, 0x1b, 0x60, 0x2f, 0x0b, 0x2d, 0xa3, 0x61, 0x6c, 0x95, 0x9d, 0x7c,
|
||||||
0x6e, 0xd0, 0x11, 0x80, 0x5e, 0x60, 0x39, 0x2b, 0x88, 0xf3, 0xa0, 0x63, 0xed, 0xae, 0xbd, 0x7c,
|
0x03, 0x0f, 0x01, 0x50, 0x0b, 0x24, 0xc6, 0x29, 0xb6, 0xa6, 0x1a, 0xc6, 0xd6, 0xf2, 0xde, 0x73,
|
||||||
0xe6, 0xde, 0x17, 0x83, 0x6b, 0x8c, 0xdc, 0x9e, 0xd2, 0x9f, 0xcf, 0x0a, 0xe2, 0xdb, 0x71, 0xb5,
|
0xfb, 0xb1, 0x18, 0x6c, 0x6d, 0x64, 0xb7, 0xa5, 0x7e, 0x30, 0x4e, 0xb1, 0x63, 0x06, 0xc5, 0x12,
|
||||||
0x44, 0x4f, 0x61, 0x95, 0x93, 0x84, 0x0a, 0xc9, 0x67, 0x98, 0x33, 0x26, 0x9d, 0x87, 0x1d, 0x6b,
|
0x3e, 0x03, 0x4b, 0x0c, 0x87, 0x84, 0x0b, 0x36, 0x46, 0x8c, 0x52, 0x61, 0x4d, 0x37, 0x8c, 0x2d,
|
||||||
0xd7, 0xf6, 0x57, 0x2a, 0xd0, 0x67, 0x4c, 0x2a, 0x91, 0x08, 0xf2, 0x38, 0x64, 0x57, 0x98, 0x66,
|
0xd3, 0x59, 0x2c, 0x40, 0x87, 0x52, 0x21, 0x45, 0xdc, 0x4d, 0x02, 0x8f, 0x5e, 0x21, 0x12, 0xbb,
|
||||||
0x41, 0x42, 0x9c, 0x85, 0x52, 0x64, 0xc0, 0x81, 0xc2, 0xd0, 0x73, 0x68, 0x56, 0xa2, 0x22, 0x0d,
|
0x21, 0xb6, 0x66, 0x72, 0x91, 0x06, 0xbb, 0x12, 0x83, 0x2f, 0x40, 0xa5, 0x10, 0xa5, 0x91, 0x2b,
|
||||||
0xe4, 0x25, 0xe3, 0x99, 0xb3, 0xa8, 0x75, 0xeb, 0x06, 0x1f, 0x1a, 0x18, 0xfd, 0x08, 0x1b, 0xb7,
|
0x2e, 0x28, 0x8b, 0xad, 0x59, 0xa5, 0x5b, 0xd1, 0x78, 0x4f, 0xc3, 0xf0, 0x07, 0xb0, 0x7a, 0xe7,
|
||||||
0x7e, 0x82, 0xa5, 0x81, 0x8a, 0xcf, 0x59, 0xd2, 0x39, 0xb8, 0xff, 0x9e, 0xc3, 0xc8, 0xdc, 0x58,
|
0xc7, 0x69, 0xe4, 0xca, 0xf8, 0xac, 0x39, 0x95, 0x83, 0xfd, 0xef, 0x39, 0xf4, 0xf5, 0x8d, 0xc5,
|
||||||
0x9d, 0xf2, 0xab, 0x3b, 0x6f, 0x11, 0xe4, 0xc1, 0x66, 0xc8, 0x98, 0xc4, 0x97, 0x34, 0x25, 0x42,
|
0x29, 0xa7, 0xb8, 0xf3, 0x0e, 0x81, 0x4d, 0x50, 0xf5, 0x28, 0x15, 0xe8, 0x82, 0x44, 0x98, 0xab,
|
||||||
0xe7, 0x84, 0x8b, 0x40, 0x8e, 0x9d, 0xba, 0x8e, 0x65, 0x43, 0x71, 0x47, 0x8a, 0x52, 0x99, 0x0d,
|
0x9c, 0x50, 0xea, 0x8a, 0xa1, 0x35, 0xaf, 0x62, 0x59, 0x95, 0xdc, 0xa1, 0xa4, 0x64, 0x66, 0x3d,
|
||||||
0x03, 0x39, 0x46, 0x2f, 0x00, 0x4d, 0x33, 0x5c, 0x70, 0x16, 0x11, 0x21, 0x18, 0xc7, 0x11, 0x9b,
|
0x57, 0x0c, 0xe1, 0x4b, 0x00, 0x47, 0x31, 0x4a, 0x19, 0xf5, 0x31, 0xe7, 0x94, 0x21, 0x9f, 0x66,
|
||||||
0xe4, 0xd2, 0x69, 0x74, 0xac, 0xdd, 0x45, 0xbf, 0x39, 0xcd, 0x86, 0x15, 0xd1, 0x55, 0x38, 0x72,
|
0x89, 0xb0, 0xca, 0x0d, 0x63, 0x6b, 0xd6, 0xa9, 0x8c, 0xe2, 0x5e, 0x41, 0xb4, 0x24, 0x0e, 0x6d,
|
||||||
0x61, 0x73, 0x9a, 0xe1, 0x8c, 0x64, 0x8c, 0xcf, 0xb0, 0xa0, 0xef, 0x09, 0xa6, 0x39, 0xce, 0x42,
|
0x50, 0x1d, 0xc5, 0x28, 0xc6, 0x31, 0x65, 0x63, 0xc4, 0xc9, 0x35, 0x46, 0x24, 0x41, 0xb1, 0x67,
|
||||||
0xc7, 0xae, 0xf4, 0x27, 0x9a, 0x1a, 0xd1, 0xf7, 0x64, 0x90, 0x9f, 0x84, 0xa8, 0x05, 0xf0, 0x7a,
|
0x99, 0x85, 0xfe, 0x44, 0x51, 0x7d, 0x72, 0x8d, 0xbb, 0xc9, 0x89, 0x07, 0x6b, 0x00, 0xbc, 0xea,
|
||||||
0xf8, 0xed, 0xc5, 0x9b, 0x9e, 0xba, 0xcb, 0x01, 0x1d, 0xc4, 0x27, 0x08, 0xfa, 0x0a, 0xb6, 0x45,
|
0x7d, 0x73, 0x7e, 0xd4, 0x96, 0x77, 0x59, 0x40, 0x05, 0xf1, 0x16, 0x02, 0xbf, 0x00, 0x1b, 0xdc,
|
||||||
0x14, 0xa4, 0x04, 0x47, 0xc5, 0x04, 0xa7, 0x34, 0xa3, 0x52, 0x60, 0xc9, 0xb0, 0x49, 0xcb, 0x59,
|
0x77, 0x23, 0x8c, 0xfc, 0x34, 0x43, 0x11, 0x89, 0x89, 0xe0, 0x48, 0x50, 0xa4, 0xd3, 0xb2, 0x16,
|
||||||
0xd6, 0x1f, 0xfd, 0xb1, 0x96, 0x74, 0x8b, 0xc9, 0xb1, 0x16, 0x9c, 0x33, 0x53, 0x07, 0x74, 0x02,
|
0xd4, 0x47, 0x7f, 0xa2, 0x24, 0xad, 0x34, 0x3b, 0x56, 0x82, 0x01, 0xd5, 0x75, 0x80, 0x27, 0xe0,
|
||||||
0xff, 0x8f, 0xc9, 0x65, 0x30, 0x49, 0x25, 0xbe, 0xad, 0x1b, 0x16, 0x11, 0x0f, 0x64, 0x34, 0xbe,
|
0x83, 0x00, 0x5f, 0xb8, 0x59, 0x24, 0xd0, 0x5d, 0xdd, 0x10, 0xf7, 0x99, 0x2b, 0xfc, 0xe1, 0x5d,
|
||||||
0x8d, 0x2e, 0x09, 0x9d, 0x15, 0x1d, 0x5d, 0xdb, 0x68, 0xbb, 0x95, 0x74, 0x54, 0x2a, 0xcb, 0x60,
|
0x74, 0xa1, 0x67, 0x2d, 0xaa, 0xe8, 0xea, 0x5a, 0xdb, 0x2a, 0xa4, 0xfd, 0x5c, 0x99, 0x07, 0xfb,
|
||||||
0x5f, 0x87, 0xe8, 0x6b, 0x78, 0x52, 0xd9, 0x4d, 0xb3, 0xfb, 0x7c, 0x56, 0xb5, 0x8f, 0x63, 0x44,
|
0xca, 0x83, 0x5f, 0x81, 0xa7, 0x85, 0xdd, 0x28, 0x7e, 0xcc, 0x67, 0x49, 0xf9, 0x58, 0x5a, 0x74,
|
||||||
0x17, 0xd9, 0x5d, 0x03, 0xd5, 0x29, 0xe3, 0x80, 0x93, 0xea, 0xac, 0xb3, 0xa6, 0xe3, 0x5f, 0xd1,
|
0x1e, 0x3f, 0x34, 0x90, 0x2f, 0x65, 0xe8, 0x32, 0x5c, 0x9c, 0xb5, 0x96, 0x55, 0xfc, 0x8b, 0x0a,
|
||||||
0xa0, 0x11, 0xa3, 0x0e, 0x2c, 0x9f, 0x76, 0x87, 0x9c, 0x5d, 0xcd, 0x5e, 0xc5, 0x31, 0x77, 0xd6,
|
0xd4, 0x62, 0xd8, 0x00, 0x0b, 0xa7, 0xad, 0x1e, 0xa3, 0x57, 0xe3, 0xfd, 0x20, 0x60, 0xd6, 0x8a,
|
||||||
0x75, 0x4d, 0x3e, 0x85, 0xd0, 0x36, 0xd8, 0x29, 0x4b, 0x70, 0x4a, 0xa6, 0x24, 0x75, 0x9a, 0x9a,
|
0xaa, 0xc9, 0xdb, 0x10, 0xdc, 0x00, 0x66, 0x44, 0x43, 0x14, 0xe1, 0x11, 0x8e, 0xac, 0x8a, 0xe2,
|
||||||
0x6f, 0xa4, 0x2c, 0x39, 0x56, 0x7b, 0xf4, 0x05, 0x3c, 0xa6, 0x0c, 0x73, 0xa2, 0x5a, 0x56, 0x0d,
|
0xcb, 0x11, 0x0d, 0x8f, 0xe5, 0x1e, 0x7e, 0x0c, 0x9e, 0x10, 0x8a, 0x18, 0x96, 0x4f, 0x56, 0x36,
|
||||||
0x0e, 0x9b, 0x48, 0x15, 0x9d, 0x20, 0x91, 0xb3, 0xa1, 0xc3, 0x7b, 0x44, 0x99, 0xaf, 0xd8, 0xf3,
|
0x0e, 0xcd, 0x84, 0x8c, 0x8e, 0x63, 0xdf, 0x5a, 0x55, 0xe1, 0xad, 0x11, 0xea, 0x48, 0x76, 0x90,
|
||||||
0x92, 0x1c, 0xe4, 0x23, 0x12, 0xed, 0x3c, 0x07, 0xfb, 0x76, 0x00, 0x90, 0x0d, 0x8b, 0xa7, 0xc3,
|
0x93, 0xdd, 0xa4, 0x8f, 0x7d, 0xf8, 0xb3, 0x71, 0x9f, 0xdb, 0x7d, 0xa9, 0xdc, 0x24, 0xa1, 0x42,
|
||||||
0xc1, 0xb0, 0xdf, 0xac, 0xa1, 0x06, 0x2c, 0x1c, 0x0d, 0x8e, 0xfb, 0x4d, 0x0b, 0xd5, 0xe1, 0x61,
|
0xbd, 0x1b, 0x6e, 0xc1, 0xc6, 0xf4, 0xd6, 0xc2, 0xde, 0x97, 0xef, 0x6b, 0xa2, 0x77, 0x2b, 0xb8,
|
||||||
0xff, 0xfc, 0x6d, 0xf3, 0xc1, 0x8e, 0x07, 0xcd, 0xbb, 0x7d, 0x86, 0x96, 0xa1, 0x3e, 0xf4, 0xcf,
|
0x7f, 0x6f, 0xd0, 0x49, 0x64, 0xbf, 0x6c, 0x04, 0xff, 0xac, 0x80, 0x9f, 0x00, 0x2b, 0xa1, 0x88,
|
||||||
0xba, 0xfd, 0xd1, 0xa8, 0x59, 0x43, 0x6b, 0x00, 0x6f, 0xbe, 0x1f, 0xf6, 0xfd, 0x8b, 0xc1, 0xe8,
|
0x24, 0x43, 0xcc, 0x88, 0x40, 0x43, 0xca, 0x85, 0xca, 0xe0, 0x9a, 0x26, 0xd8, 0x5a, 0x53, 0x95,
|
||||||
0xcc, 0x6f, 0x5a, 0x3b, 0x7f, 0x3c, 0x84, 0x35, 0xd3, 0x26, 0x3d, 0x22, 0x03, 0x9a, 0x0a, 0xf4,
|
0xfa, 0x5f, 0x42, 0xbb, 0x39, 0x7d, 0x44, 0xb9, 0x18, 0x68, 0x12, 0x3e, 0x05, 0x80, 0xfb, 0x2c,
|
||||||
0x04, 0x40, 0x8f, 0x0a, 0xce, 0x83, 0x8c, 0xe8, 0xd1, 0xb5, 0x7d, 0x5b, 0x23, 0xa7, 0x41, 0x46,
|
0xf3, 0x50, 0x44, 0x43, 0x6e, 0x55, 0x95, 0xd4, 0x54, 0xc8, 0x31, 0x0d, 0xf9, 0xfa, 0x29, 0x68,
|
||||||
0x50, 0x17, 0x20, 0xe2, 0x24, 0x90, 0x24, 0xc6, 0x81, 0xd4, 0xe3, 0xbb, 0xfc, 0x72, 0xcb, 0x2d,
|
0xbc, 0x2f, 0x30, 0x58, 0x01, 0xd3, 0x97, 0x78, 0xac, 0xa6, 0x88, 0xe9, 0xc8, 0xa5, 0x9c, 0x2c,
|
||||||
0x9f, 0x05, 0xb7, 0x7a, 0x16, 0xdc, 0xf3, 0xea, 0x59, 0x38, 0x6c, 0x5c, 0xdf, 0xb4, 0x6b, 0xbf,
|
0x23, 0x37, 0xca, 0xf2, 0xf1, 0x61, 0x3a, 0xf9, 0xe6, 0xb3, 0xa9, 0x4f, 0x8d, 0xcd, 0x17, 0xc0,
|
||||||
0xfc, 0xd9, 0xb6, 0x7c, 0xdb, 0x9c, 0x7b, 0x25, 0xd1, 0x67, 0x80, 0xde, 0x11, 0x9e, 0x93, 0x54,
|
0xbc, 0x9b, 0x16, 0xd0, 0x04, 0xb3, 0xa7, 0xbd, 0x6e, 0xaf, 0x53, 0x29, 0xc1, 0x32, 0x98, 0x39,
|
||||||
0x97, 0x01, 0x1f, 0xec, 0xef, 0xe3, 0x5c, 0xe8, 0x01, 0x5e, 0xf0, 0xd7, 0x4b, 0x46, 0x39, 0x1c,
|
0xec, 0x1e, 0x77, 0x2a, 0x06, 0x9c, 0x07, 0xd3, 0x9d, 0xc1, 0xeb, 0xca, 0xd4, 0x66, 0x13, 0x54,
|
||||||
0xec, 0xef, 0x9f, 0x0a, 0xe4, 0xc2, 0x23, 0xd3, 0xb4, 0x11, 0xcb, 0x32, 0x2a, 0x71, 0x38, 0x93,
|
0x1e, 0x36, 0x25, 0x5c, 0x00, 0xf3, 0x3d, 0xe7, 0xac, 0xd5, 0xe9, 0xf7, 0x2b, 0x25, 0xb8, 0x0c,
|
||||||
0x44, 0xe8, 0x49, 0x5e, 0xf0, 0x37, 0x4a, 0xaa, 0xab, 0x99, 0x43, 0x45, 0xa0, 0x23, 0xe8, 0x18,
|
0xc0, 0xd1, 0x77, 0xbd, 0x8e, 0x73, 0xde, 0xed, 0x9f, 0x39, 0x15, 0x63, 0xf3, 0x8f, 0x69, 0xb0,
|
||||||
0xfd, 0x4f, 0x8c, 0xbf, 0xa3, 0x79, 0x82, 0x05, 0x91, 0xb8, 0xe0, 0x74, 0x1a, 0x48, 0x62, 0x0e,
|
0xac, 0x7b, 0xaa, 0x8d, 0x85, 0x4b, 0x22, 0x2e, 0xb3, 0x53, 0x73, 0x05, 0x25, 0x6e, 0x8c, 0x75,
|
||||||
0x2f, 0xea, 0xc3, 0xff, 0x2b, 0x75, 0x6f, 0x4b, 0xd9, 0x88, 0xc8, 0x61, 0x29, 0x2a, 0x7d, 0x7a,
|
0x84, 0xa6, 0x42, 0x4e, 0xdd, 0x18, 0xc3, 0x16, 0x00, 0x3e, 0xc3, 0xae, 0xc0, 0x01, 0x72, 0x85,
|
||||||
0xd0, 0xbe, 0xc7, 0x47, 0xf7, 0x43, 0x6c, 0x6c, 0x96, 0xb4, 0xcd, 0xf6, 0x5d, 0x9b, 0x91, 0xd6,
|
0x0a, 0x76, 0x61, 0x6f, 0xdd, 0xce, 0x67, 0xa8, 0x5d, 0xcc, 0x50, 0x7b, 0x50, 0xcc, 0xd0, 0x83,
|
||||||
0x94, 0x2e, 0x2f, 0x00, 0xcc, 0x80, 0x62, 0x1a, 0xeb, 0x51, 0x5e, 0x3d, 0x5c, 0x9d, 0xdf, 0xb4,
|
0xf2, 0xcd, 0x6d, 0xbd, 0xf4, 0xcb, 0x9f, 0x75, 0xc3, 0x31, 0xf5, 0xb9, 0x7d, 0x01, 0x3f, 0x04,
|
||||||
0x6d, 0x53, 0xf6, 0x41, 0xcf, 0xb7, 0x8d, 0x60, 0x10, 0xa3, 0x67, 0xd0, 0x9c, 0x08, 0xc2, 0xff,
|
0xf0, 0x12, 0xb3, 0x04, 0x47, 0xaa, 0xe2, 0x68, 0x77, 0x67, 0x07, 0x25, 0x5c, 0x4d, 0xbb, 0x19,
|
||||||
0x51, 0x96, 0x86, 0xbe, 0x64, 0x55, 0xe1, 0x1f, 0x8b, 0xf2, 0x14, 0xea, 0xe4, 0x8a, 0x44, 0xca,
|
0x67, 0x25, 0x67, 0xa4, 0xc3, 0xee, 0xce, 0xce, 0x29, 0x87, 0x36, 0x58, 0xd3, 0x1d, 0xee, 0xd3,
|
||||||
0x53, 0xcd, 0xaf, 0x7d, 0x08, 0xf3, 0x9b, 0xf6, 0x52, 0xff, 0x8a, 0x44, 0x83, 0x9e, 0xbf, 0xa4,
|
0x38, 0x26, 0x02, 0x79, 0x63, 0x81, 0xb9, 0x1a, 0x7b, 0x33, 0xce, 0x6a, 0x4e, 0xb5, 0x14, 0x73,
|
||||||
0xa8, 0x41, 0x7c, 0x18, 0x5f, 0x7f, 0x68, 0xd5, 0x7e, 0xff, 0xd0, 0xaa, 0xfd, 0x3c, 0x6f, 0x59,
|
0x20, 0x09, 0x78, 0x08, 0x1a, 0x5a, 0xff, 0x23, 0x65, 0x97, 0x24, 0x09, 0x11, 0xc7, 0x02, 0xa5,
|
||||||
0xd7, 0xf3, 0x96, 0xf5, 0xdb, 0xbc, 0x65, 0xfd, 0x35, 0x6f, 0x59, 0x3f, 0x7c, 0xf3, 0xdf, 0xff,
|
0x8c, 0x8c, 0x5c, 0x81, 0xf5, 0xe1, 0x59, 0x75, 0xf8, 0xff, 0xb9, 0xee, 0x75, 0x2e, 0xeb, 0x63,
|
||||||
0x44, 0xbe, 0x34, 0xbf, 0xdf, 0xd5, 0xc2, 0x25, 0xfd, 0xdd, 0x3f, 0xff, 0x3b, 0x00, 0x00, 0xff,
|
0xd1, 0xcb, 0x45, 0xb9, 0x4f, 0x1b, 0xd4, 0x1f, 0xf1, 0x51, 0xcd, 0x13, 0x68, 0x9b, 0x39, 0x65,
|
||||||
0xff, 0x6b, 0x83, 0xa6, 0x5f, 0x9b, 0x06, 0x00, 0x00,
|
0xb3, 0xf1, 0xd0, 0xa6, 0xaf, 0x34, 0xb9, 0xcb, 0x4b, 0x00, 0xf4, 0x34, 0x43, 0x24, 0x50, 0x73,
|
||||||
|
0x6f, 0xe9, 0x60, 0x69, 0x72, 0x5b, 0x37, 0x75, 0xd9, 0xbb, 0x6d, 0xc7, 0xd4, 0x82, 0x6e, 0x00,
|
||||||
|
0x9f, 0x83, 0x4a, 0xc6, 0x31, 0x7b, 0xa7, 0x2c, 0x65, 0x75, 0xc9, 0x92, 0xc4, 0xef, 0x8b, 0xf2,
|
||||||
|
0x0c, 0xcc, 0xe3, 0x2b, 0xec, 0x4b, 0x4f, 0x39, 0xec, 0xcc, 0x03, 0x30, 0xb9, 0xad, 0xcf, 0x75,
|
||||||
|
0xae, 0xb0, 0xdf, 0x6d, 0x3b, 0x73, 0x92, 0xea, 0x06, 0x07, 0xc1, 0xcd, 0x9b, 0x5a, 0xe9, 0xb7,
|
||||||
|
0x37, 0xb5, 0xd2, 0x4f, 0x93, 0x9a, 0x71, 0x33, 0xa9, 0x19, 0xbf, 0x4e, 0x6a, 0xc6, 0x5f, 0x93,
|
||||||
|
0x9a, 0xf1, 0xfd, 0xd7, 0xff, 0xfd, 0x1f, 0xf7, 0x73, 0xfd, 0xfb, 0x6d, 0xc9, 0x9b, 0x53, 0xdf,
|
||||||
|
0xfd, 0xa3, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0xba, 0x6d, 0x7b, 0x04, 0xc8, 0x07, 0x00, 0x00,
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Options) Marshal() (dAtA []byte, err error) {
|
func (m *Options) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
dAtA = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(dAtA)
|
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -316,131 +333,189 @@ func (m *Options) Marshal() (dAtA []byte, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *Options) MarshalTo(dAtA []byte) (int, error) {
|
func (m *Options) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
size := m.Size()
|
||||||
|
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Options) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||||
|
i := len(dAtA)
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
if m.Debug {
|
if m.XXX_unrecognized != nil {
|
||||||
dAtA[i] = 0x8
|
i -= len(m.XXX_unrecognized)
|
||||||
i++
|
copy(dAtA[i:], m.XXX_unrecognized)
|
||||||
if m.Debug {
|
}
|
||||||
|
if m.ScrubLogs {
|
||||||
|
i--
|
||||||
|
if m.ScrubLogs {
|
||||||
dAtA[i] = 1
|
dAtA[i] = 1
|
||||||
} else {
|
} else {
|
||||||
dAtA[i] = 0
|
dAtA[i] = 0
|
||||||
}
|
}
|
||||||
i++
|
i--
|
||||||
}
|
|
||||||
if m.DebugType != 0 {
|
|
||||||
dAtA[i] = 0x10
|
|
||||||
i++
|
|
||||||
i = encodeVarintRunhcs(dAtA, i, uint64(m.DebugType))
|
|
||||||
}
|
|
||||||
if len(m.RegistryRoot) > 0 {
|
|
||||||
dAtA[i] = 0x1a
|
|
||||||
i++
|
|
||||||
i = encodeVarintRunhcs(dAtA, i, uint64(len(m.RegistryRoot)))
|
|
||||||
i += copy(dAtA[i:], m.RegistryRoot)
|
|
||||||
}
|
|
||||||
if len(m.SandboxImage) > 0 {
|
|
||||||
dAtA[i] = 0x22
|
|
||||||
i++
|
|
||||||
i = encodeVarintRunhcs(dAtA, i, uint64(len(m.SandboxImage)))
|
|
||||||
i += copy(dAtA[i:], m.SandboxImage)
|
|
||||||
}
|
|
||||||
if len(m.SandboxPlatform) > 0 {
|
|
||||||
dAtA[i] = 0x2a
|
|
||||||
i++
|
|
||||||
i = encodeVarintRunhcs(dAtA, i, uint64(len(m.SandboxPlatform)))
|
|
||||||
i += copy(dAtA[i:], m.SandboxPlatform)
|
|
||||||
}
|
|
||||||
if m.SandboxIsolation != 0 {
|
|
||||||
dAtA[i] = 0x30
|
|
||||||
i++
|
|
||||||
i = encodeVarintRunhcs(dAtA, i, uint64(m.SandboxIsolation))
|
|
||||||
}
|
|
||||||
if len(m.BootFilesRootPath) > 0 {
|
|
||||||
dAtA[i] = 0x3a
|
|
||||||
i++
|
|
||||||
i = encodeVarintRunhcs(dAtA, i, uint64(len(m.BootFilesRootPath)))
|
|
||||||
i += copy(dAtA[i:], m.BootFilesRootPath)
|
|
||||||
}
|
|
||||||
if m.VmProcessorCount != 0 {
|
|
||||||
dAtA[i] = 0x40
|
|
||||||
i++
|
|
||||||
i = encodeVarintRunhcs(dAtA, i, uint64(m.VmProcessorCount))
|
|
||||||
}
|
|
||||||
if m.VmMemorySizeInMb != 0 {
|
|
||||||
dAtA[i] = 0x48
|
|
||||||
i++
|
|
||||||
i = encodeVarintRunhcs(dAtA, i, uint64(m.VmMemorySizeInMb))
|
|
||||||
}
|
|
||||||
if len(m.GPUVHDPath) > 0 {
|
|
||||||
dAtA[i] = 0x52
|
|
||||||
i++
|
|
||||||
i = encodeVarintRunhcs(dAtA, i, uint64(len(m.GPUVHDPath)))
|
|
||||||
i += copy(dAtA[i:], m.GPUVHDPath)
|
|
||||||
}
|
|
||||||
if m.ScaleCpuLimitsToSandbox {
|
|
||||||
dAtA[i] = 0x58
|
|
||||||
i++
|
|
||||||
if m.ScaleCpuLimitsToSandbox {
|
|
||||||
dAtA[i] = 1
|
|
||||||
} else {
|
|
||||||
dAtA[i] = 0
|
|
||||||
}
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
if m.DefaultContainerScratchSizeInGb != 0 {
|
|
||||||
dAtA[i] = 0x60
|
|
||||||
i++
|
|
||||||
i = encodeVarintRunhcs(dAtA, i, uint64(m.DefaultContainerScratchSizeInGb))
|
|
||||||
}
|
|
||||||
if m.DefaultVmScratchSizeInGb != 0 {
|
|
||||||
dAtA[i] = 0x68
|
|
||||||
i++
|
|
||||||
i = encodeVarintRunhcs(dAtA, i, uint64(m.DefaultVmScratchSizeInGb))
|
|
||||||
}
|
|
||||||
if m.ShareScratch {
|
|
||||||
dAtA[i] = 0x70
|
|
||||||
i++
|
|
||||||
if m.ShareScratch {
|
|
||||||
dAtA[i] = 1
|
|
||||||
} else {
|
|
||||||
dAtA[i] = 0
|
|
||||||
}
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
if len(m.NCProxyAddr) > 0 {
|
|
||||||
dAtA[i] = 0x7a
|
|
||||||
i++
|
|
||||||
i = encodeVarintRunhcs(dAtA, i, uint64(len(m.NCProxyAddr)))
|
|
||||||
i += copy(dAtA[i:], m.NCProxyAddr)
|
|
||||||
}
|
|
||||||
if len(m.LogLevel) > 0 {
|
|
||||||
dAtA[i] = 0x82
|
|
||||||
i++
|
|
||||||
dAtA[i] = 0x1
|
dAtA[i] = 0x1
|
||||||
i++
|
i--
|
||||||
i = encodeVarintRunhcs(dAtA, i, uint64(len(m.LogLevel)))
|
dAtA[i] = 0xa0
|
||||||
i += copy(dAtA[i:], m.LogLevel)
|
}
|
||||||
|
if m.NoInheritHostTimezone {
|
||||||
|
i--
|
||||||
|
if m.NoInheritHostTimezone {
|
||||||
|
dAtA[i] = 1
|
||||||
|
} else {
|
||||||
|
dAtA[i] = 0
|
||||||
|
}
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x1
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x98
|
||||||
|
}
|
||||||
|
if len(m.DefaultContainerAnnotations) > 0 {
|
||||||
|
for k := range m.DefaultContainerAnnotations {
|
||||||
|
v := m.DefaultContainerAnnotations[k]
|
||||||
|
baseI := i
|
||||||
|
i -= len(v)
|
||||||
|
copy(dAtA[i:], v)
|
||||||
|
i = encodeVarintRunhcs(dAtA, i, uint64(len(v)))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x12
|
||||||
|
i -= len(k)
|
||||||
|
copy(dAtA[i:], k)
|
||||||
|
i = encodeVarintRunhcs(dAtA, i, uint64(len(k)))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
i = encodeVarintRunhcs(dAtA, i, uint64(baseI-i))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x1
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x92
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if m.IoRetryTimeoutInSec != 0 {
|
if m.IoRetryTimeoutInSec != 0 {
|
||||||
dAtA[i] = 0x88
|
|
||||||
i++
|
|
||||||
dAtA[i] = 0x1
|
|
||||||
i++
|
|
||||||
i = encodeVarintRunhcs(dAtA, i, uint64(m.IoRetryTimeoutInSec))
|
i = encodeVarintRunhcs(dAtA, i, uint64(m.IoRetryTimeoutInSec))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x1
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x88
|
||||||
}
|
}
|
||||||
if m.XXX_unrecognized != nil {
|
if len(m.LogLevel) > 0 {
|
||||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
i -= len(m.LogLevel)
|
||||||
|
copy(dAtA[i:], m.LogLevel)
|
||||||
|
i = encodeVarintRunhcs(dAtA, i, uint64(len(m.LogLevel)))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x1
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x82
|
||||||
}
|
}
|
||||||
return i, nil
|
if len(m.NCProxyAddr) > 0 {
|
||||||
|
i -= len(m.NCProxyAddr)
|
||||||
|
copy(dAtA[i:], m.NCProxyAddr)
|
||||||
|
i = encodeVarintRunhcs(dAtA, i, uint64(len(m.NCProxyAddr)))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x7a
|
||||||
|
}
|
||||||
|
if m.ShareScratch {
|
||||||
|
i--
|
||||||
|
if m.ShareScratch {
|
||||||
|
dAtA[i] = 1
|
||||||
|
} else {
|
||||||
|
dAtA[i] = 0
|
||||||
|
}
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x70
|
||||||
|
}
|
||||||
|
if m.DefaultVmScratchSizeInGb != 0 {
|
||||||
|
i = encodeVarintRunhcs(dAtA, i, uint64(m.DefaultVmScratchSizeInGb))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x68
|
||||||
|
}
|
||||||
|
if m.DefaultContainerScratchSizeInGb != 0 {
|
||||||
|
i = encodeVarintRunhcs(dAtA, i, uint64(m.DefaultContainerScratchSizeInGb))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x60
|
||||||
|
}
|
||||||
|
if m.ScaleCpuLimitsToSandbox {
|
||||||
|
i--
|
||||||
|
if m.ScaleCpuLimitsToSandbox {
|
||||||
|
dAtA[i] = 1
|
||||||
|
} else {
|
||||||
|
dAtA[i] = 0
|
||||||
|
}
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x58
|
||||||
|
}
|
||||||
|
if len(m.GPUVHDPath) > 0 {
|
||||||
|
i -= len(m.GPUVHDPath)
|
||||||
|
copy(dAtA[i:], m.GPUVHDPath)
|
||||||
|
i = encodeVarintRunhcs(dAtA, i, uint64(len(m.GPUVHDPath)))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x52
|
||||||
|
}
|
||||||
|
if m.VmMemorySizeInMb != 0 {
|
||||||
|
i = encodeVarintRunhcs(dAtA, i, uint64(m.VmMemorySizeInMb))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x48
|
||||||
|
}
|
||||||
|
if m.VmProcessorCount != 0 {
|
||||||
|
i = encodeVarintRunhcs(dAtA, i, uint64(m.VmProcessorCount))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x40
|
||||||
|
}
|
||||||
|
if len(m.BootFilesRootPath) > 0 {
|
||||||
|
i -= len(m.BootFilesRootPath)
|
||||||
|
copy(dAtA[i:], m.BootFilesRootPath)
|
||||||
|
i = encodeVarintRunhcs(dAtA, i, uint64(len(m.BootFilesRootPath)))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x3a
|
||||||
|
}
|
||||||
|
if m.SandboxIsolation != 0 {
|
||||||
|
i = encodeVarintRunhcs(dAtA, i, uint64(m.SandboxIsolation))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x30
|
||||||
|
}
|
||||||
|
if len(m.SandboxPlatform) > 0 {
|
||||||
|
i -= len(m.SandboxPlatform)
|
||||||
|
copy(dAtA[i:], m.SandboxPlatform)
|
||||||
|
i = encodeVarintRunhcs(dAtA, i, uint64(len(m.SandboxPlatform)))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x2a
|
||||||
|
}
|
||||||
|
if len(m.SandboxImage) > 0 {
|
||||||
|
i -= len(m.SandboxImage)
|
||||||
|
copy(dAtA[i:], m.SandboxImage)
|
||||||
|
i = encodeVarintRunhcs(dAtA, i, uint64(len(m.SandboxImage)))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x22
|
||||||
|
}
|
||||||
|
if len(m.RegistryRoot) > 0 {
|
||||||
|
i -= len(m.RegistryRoot)
|
||||||
|
copy(dAtA[i:], m.RegistryRoot)
|
||||||
|
i = encodeVarintRunhcs(dAtA, i, uint64(len(m.RegistryRoot)))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x1a
|
||||||
|
}
|
||||||
|
if m.DebugType != 0 {
|
||||||
|
i = encodeVarintRunhcs(dAtA, i, uint64(m.DebugType))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x10
|
||||||
|
}
|
||||||
|
if m.Debug {
|
||||||
|
i--
|
||||||
|
if m.Debug {
|
||||||
|
dAtA[i] = 1
|
||||||
|
} else {
|
||||||
|
dAtA[i] = 0
|
||||||
|
}
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x8
|
||||||
|
}
|
||||||
|
return len(dAtA) - i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *ProcessDetails) Marshal() (dAtA []byte, err error) {
|
func (m *ProcessDetails) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
dAtA = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(dAtA)
|
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -448,74 +523,84 @@ func (m *ProcessDetails) Marshal() (dAtA []byte, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *ProcessDetails) MarshalTo(dAtA []byte) (int, error) {
|
func (m *ProcessDetails) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
size := m.Size()
|
||||||
|
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ProcessDetails) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||||
|
i := len(dAtA)
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
if len(m.ImageName) > 0 {
|
if m.XXX_unrecognized != nil {
|
||||||
dAtA[i] = 0xa
|
i -= len(m.XXX_unrecognized)
|
||||||
i++
|
copy(dAtA[i:], m.XXX_unrecognized)
|
||||||
i = encodeVarintRunhcs(dAtA, i, uint64(len(m.ImageName)))
|
|
||||||
i += copy(dAtA[i:], m.ImageName)
|
|
||||||
}
|
|
||||||
dAtA[i] = 0x12
|
|
||||||
i++
|
|
||||||
i = encodeVarintRunhcs(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt)))
|
|
||||||
n1, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.CreatedAt, dAtA[i:])
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
i += n1
|
|
||||||
if m.KernelTime_100Ns != 0 {
|
|
||||||
dAtA[i] = 0x18
|
|
||||||
i++
|
|
||||||
i = encodeVarintRunhcs(dAtA, i, uint64(m.KernelTime_100Ns))
|
|
||||||
}
|
|
||||||
if m.MemoryCommitBytes != 0 {
|
|
||||||
dAtA[i] = 0x20
|
|
||||||
i++
|
|
||||||
i = encodeVarintRunhcs(dAtA, i, uint64(m.MemoryCommitBytes))
|
|
||||||
}
|
|
||||||
if m.MemoryWorkingSetPrivateBytes != 0 {
|
|
||||||
dAtA[i] = 0x28
|
|
||||||
i++
|
|
||||||
i = encodeVarintRunhcs(dAtA, i, uint64(m.MemoryWorkingSetPrivateBytes))
|
|
||||||
}
|
|
||||||
if m.MemoryWorkingSetSharedBytes != 0 {
|
|
||||||
dAtA[i] = 0x30
|
|
||||||
i++
|
|
||||||
i = encodeVarintRunhcs(dAtA, i, uint64(m.MemoryWorkingSetSharedBytes))
|
|
||||||
}
|
|
||||||
if m.ProcessID != 0 {
|
|
||||||
dAtA[i] = 0x38
|
|
||||||
i++
|
|
||||||
i = encodeVarintRunhcs(dAtA, i, uint64(m.ProcessID))
|
|
||||||
}
|
|
||||||
if m.UserTime_100Ns != 0 {
|
|
||||||
dAtA[i] = 0x40
|
|
||||||
i++
|
|
||||||
i = encodeVarintRunhcs(dAtA, i, uint64(m.UserTime_100Ns))
|
|
||||||
}
|
}
|
||||||
if len(m.ExecID) > 0 {
|
if len(m.ExecID) > 0 {
|
||||||
dAtA[i] = 0x4a
|
i -= len(m.ExecID)
|
||||||
i++
|
copy(dAtA[i:], m.ExecID)
|
||||||
i = encodeVarintRunhcs(dAtA, i, uint64(len(m.ExecID)))
|
i = encodeVarintRunhcs(dAtA, i, uint64(len(m.ExecID)))
|
||||||
i += copy(dAtA[i:], m.ExecID)
|
i--
|
||||||
|
dAtA[i] = 0x4a
|
||||||
}
|
}
|
||||||
if m.XXX_unrecognized != nil {
|
if m.UserTime_100Ns != 0 {
|
||||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
i = encodeVarintRunhcs(dAtA, i, uint64(m.UserTime_100Ns))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x40
|
||||||
}
|
}
|
||||||
return i, nil
|
if m.ProcessID != 0 {
|
||||||
|
i = encodeVarintRunhcs(dAtA, i, uint64(m.ProcessID))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x38
|
||||||
|
}
|
||||||
|
if m.MemoryWorkingSetSharedBytes != 0 {
|
||||||
|
i = encodeVarintRunhcs(dAtA, i, uint64(m.MemoryWorkingSetSharedBytes))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x30
|
||||||
|
}
|
||||||
|
if m.MemoryWorkingSetPrivateBytes != 0 {
|
||||||
|
i = encodeVarintRunhcs(dAtA, i, uint64(m.MemoryWorkingSetPrivateBytes))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x28
|
||||||
|
}
|
||||||
|
if m.MemoryCommitBytes != 0 {
|
||||||
|
i = encodeVarintRunhcs(dAtA, i, uint64(m.MemoryCommitBytes))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x20
|
||||||
|
}
|
||||||
|
if m.KernelTime_100Ns != 0 {
|
||||||
|
i = encodeVarintRunhcs(dAtA, i, uint64(m.KernelTime_100Ns))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x18
|
||||||
|
}
|
||||||
|
n1, err1 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.CreatedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt):])
|
||||||
|
if err1 != nil {
|
||||||
|
return 0, err1
|
||||||
|
}
|
||||||
|
i -= n1
|
||||||
|
i = encodeVarintRunhcs(dAtA, i, uint64(n1))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x12
|
||||||
|
if len(m.ImageName) > 0 {
|
||||||
|
i -= len(m.ImageName)
|
||||||
|
copy(dAtA[i:], m.ImageName)
|
||||||
|
i = encodeVarintRunhcs(dAtA, i, uint64(len(m.ImageName)))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
}
|
||||||
|
return len(dAtA) - i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func encodeVarintRunhcs(dAtA []byte, offset int, v uint64) int {
|
func encodeVarintRunhcs(dAtA []byte, offset int, v uint64) int {
|
||||||
|
offset -= sovRunhcs(v)
|
||||||
|
base := offset
|
||||||
for v >= 1<<7 {
|
for v >= 1<<7 {
|
||||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||||
v >>= 7
|
v >>= 7
|
||||||
offset++
|
offset++
|
||||||
}
|
}
|
||||||
dAtA[offset] = uint8(v)
|
dAtA[offset] = uint8(v)
|
||||||
return offset + 1
|
return base
|
||||||
}
|
}
|
||||||
func (m *Options) Size() (n int) {
|
func (m *Options) Size() (n int) {
|
||||||
if m == nil {
|
if m == nil {
|
||||||
@ -581,6 +666,20 @@ func (m *Options) Size() (n int) {
|
|||||||
if m.IoRetryTimeoutInSec != 0 {
|
if m.IoRetryTimeoutInSec != 0 {
|
||||||
n += 2 + sovRunhcs(uint64(m.IoRetryTimeoutInSec))
|
n += 2 + sovRunhcs(uint64(m.IoRetryTimeoutInSec))
|
||||||
}
|
}
|
||||||
|
if len(m.DefaultContainerAnnotations) > 0 {
|
||||||
|
for k, v := range m.DefaultContainerAnnotations {
|
||||||
|
_ = k
|
||||||
|
_ = v
|
||||||
|
mapEntrySize := 1 + len(k) + sovRunhcs(uint64(len(k))) + 1 + len(v) + sovRunhcs(uint64(len(v)))
|
||||||
|
n += mapEntrySize + 2 + sovRunhcs(uint64(mapEntrySize))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if m.NoInheritHostTimezone {
|
||||||
|
n += 3
|
||||||
|
}
|
||||||
|
if m.ScrubLogs {
|
||||||
|
n += 3
|
||||||
|
}
|
||||||
if m.XXX_unrecognized != nil {
|
if m.XXX_unrecognized != nil {
|
||||||
n += len(m.XXX_unrecognized)
|
n += len(m.XXX_unrecognized)
|
||||||
}
|
}
|
||||||
@ -628,14 +727,7 @@ func (m *ProcessDetails) Size() (n int) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func sovRunhcs(x uint64) (n int) {
|
func sovRunhcs(x uint64) (n int) {
|
||||||
for {
|
return (math_bits.Len64(x|1) + 6) / 7
|
||||||
n++
|
|
||||||
x >>= 7
|
|
||||||
if x == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return n
|
|
||||||
}
|
}
|
||||||
func sozRunhcs(x uint64) (n int) {
|
func sozRunhcs(x uint64) (n int) {
|
||||||
return sovRunhcs(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
return sovRunhcs(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||||
@ -644,6 +736,16 @@ func (this *Options) String() string {
|
|||||||
if this == nil {
|
if this == nil {
|
||||||
return "nil"
|
return "nil"
|
||||||
}
|
}
|
||||||
|
keysForDefaultContainerAnnotations := make([]string, 0, len(this.DefaultContainerAnnotations))
|
||||||
|
for k, _ := range this.DefaultContainerAnnotations {
|
||||||
|
keysForDefaultContainerAnnotations = append(keysForDefaultContainerAnnotations, k)
|
||||||
|
}
|
||||||
|
github_com_gogo_protobuf_sortkeys.Strings(keysForDefaultContainerAnnotations)
|
||||||
|
mapStringForDefaultContainerAnnotations := "map[string]string{"
|
||||||
|
for _, k := range keysForDefaultContainerAnnotations {
|
||||||
|
mapStringForDefaultContainerAnnotations += fmt.Sprintf("%v: %v,", k, this.DefaultContainerAnnotations[k])
|
||||||
|
}
|
||||||
|
mapStringForDefaultContainerAnnotations += "}"
|
||||||
s := strings.Join([]string{`&Options{`,
|
s := strings.Join([]string{`&Options{`,
|
||||||
`Debug:` + fmt.Sprintf("%v", this.Debug) + `,`,
|
`Debug:` + fmt.Sprintf("%v", this.Debug) + `,`,
|
||||||
`DebugType:` + fmt.Sprintf("%v", this.DebugType) + `,`,
|
`DebugType:` + fmt.Sprintf("%v", this.DebugType) + `,`,
|
||||||
@ -662,6 +764,9 @@ func (this *Options) String() string {
|
|||||||
`NCProxyAddr:` + fmt.Sprintf("%v", this.NCProxyAddr) + `,`,
|
`NCProxyAddr:` + fmt.Sprintf("%v", this.NCProxyAddr) + `,`,
|
||||||
`LogLevel:` + fmt.Sprintf("%v", this.LogLevel) + `,`,
|
`LogLevel:` + fmt.Sprintf("%v", this.LogLevel) + `,`,
|
||||||
`IoRetryTimeoutInSec:` + fmt.Sprintf("%v", this.IoRetryTimeoutInSec) + `,`,
|
`IoRetryTimeoutInSec:` + fmt.Sprintf("%v", this.IoRetryTimeoutInSec) + `,`,
|
||||||
|
`DefaultContainerAnnotations:` + mapStringForDefaultContainerAnnotations + `,`,
|
||||||
|
`NoInheritHostTimezone:` + fmt.Sprintf("%v", this.NoInheritHostTimezone) + `,`,
|
||||||
|
`ScrubLogs:` + fmt.Sprintf("%v", this.ScrubLogs) + `,`,
|
||||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||||
`}`,
|
`}`,
|
||||||
}, "")
|
}, "")
|
||||||
@ -673,7 +778,7 @@ func (this *ProcessDetails) String() string {
|
|||||||
}
|
}
|
||||||
s := strings.Join([]string{`&ProcessDetails{`,
|
s := strings.Join([]string{`&ProcessDetails{`,
|
||||||
`ImageName:` + fmt.Sprintf("%v", this.ImageName) + `,`,
|
`ImageName:` + fmt.Sprintf("%v", this.ImageName) + `,`,
|
||||||
`CreatedAt:` + strings.Replace(strings.Replace(this.CreatedAt.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
|
`CreatedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CreatedAt), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
|
||||||
`KernelTime_100Ns:` + fmt.Sprintf("%v", this.KernelTime_100Ns) + `,`,
|
`KernelTime_100Ns:` + fmt.Sprintf("%v", this.KernelTime_100Ns) + `,`,
|
||||||
`MemoryCommitBytes:` + fmt.Sprintf("%v", this.MemoryCommitBytes) + `,`,
|
`MemoryCommitBytes:` + fmt.Sprintf("%v", this.MemoryCommitBytes) + `,`,
|
||||||
`MemoryWorkingSetPrivateBytes:` + fmt.Sprintf("%v", this.MemoryWorkingSetPrivateBytes) + `,`,
|
`MemoryWorkingSetPrivateBytes:` + fmt.Sprintf("%v", this.MemoryWorkingSetPrivateBytes) + `,`,
|
||||||
@ -1140,16 +1245,180 @@ func (m *Options) Unmarshal(dAtA []byte) error {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
case 18:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field DefaultContainerAnnotations", wireType)
|
||||||
|
}
|
||||||
|
var msglen int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowRunhcs
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
msglen |= int(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if msglen < 0 {
|
||||||
|
return ErrInvalidLengthRunhcs
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + msglen
|
||||||
|
if postIndex < 0 {
|
||||||
|
return ErrInvalidLengthRunhcs
|
||||||
|
}
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
if m.DefaultContainerAnnotations == nil {
|
||||||
|
m.DefaultContainerAnnotations = make(map[string]string)
|
||||||
|
}
|
||||||
|
var mapkey string
|
||||||
|
var mapvalue string
|
||||||
|
for iNdEx < postIndex {
|
||||||
|
entryPreIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowRunhcs
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= uint64(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
if fieldNum == 1 {
|
||||||
|
var stringLenmapkey uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowRunhcs
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLenmapkey |= uint64(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLenmapkey := int(stringLenmapkey)
|
||||||
|
if intStringLenmapkey < 0 {
|
||||||
|
return ErrInvalidLengthRunhcs
|
||||||
|
}
|
||||||
|
postStringIndexmapkey := iNdEx + intStringLenmapkey
|
||||||
|
if postStringIndexmapkey < 0 {
|
||||||
|
return ErrInvalidLengthRunhcs
|
||||||
|
}
|
||||||
|
if postStringIndexmapkey > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
|
||||||
|
iNdEx = postStringIndexmapkey
|
||||||
|
} else if fieldNum == 2 {
|
||||||
|
var stringLenmapvalue uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowRunhcs
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLenmapvalue |= uint64(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLenmapvalue := int(stringLenmapvalue)
|
||||||
|
if intStringLenmapvalue < 0 {
|
||||||
|
return ErrInvalidLengthRunhcs
|
||||||
|
}
|
||||||
|
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
|
||||||
|
if postStringIndexmapvalue < 0 {
|
||||||
|
return ErrInvalidLengthRunhcs
|
||||||
|
}
|
||||||
|
if postStringIndexmapvalue > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
|
||||||
|
iNdEx = postStringIndexmapvalue
|
||||||
|
} else {
|
||||||
|
iNdEx = entryPreIndex
|
||||||
|
skippy, err := skipRunhcs(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||||
|
return ErrInvalidLengthRunhcs
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > postIndex {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
m.DefaultContainerAnnotations[mapkey] = mapvalue
|
||||||
|
iNdEx = postIndex
|
||||||
|
case 19:
|
||||||
|
if wireType != 0 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field NoInheritHostTimezone", wireType)
|
||||||
|
}
|
||||||
|
var v int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowRunhcs
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
v |= int(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
m.NoInheritHostTimezone = bool(v != 0)
|
||||||
|
case 20:
|
||||||
|
if wireType != 0 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field ScrubLogs", wireType)
|
||||||
|
}
|
||||||
|
var v int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowRunhcs
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
v |= int(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
m.ScrubLogs = bool(v != 0)
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipRunhcs(dAtA[iNdEx:])
|
skippy, err := skipRunhcs(dAtA[iNdEx:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if skippy < 0 {
|
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||||
return ErrInvalidLengthRunhcs
|
|
||||||
}
|
|
||||||
if (iNdEx + skippy) < 0 {
|
|
||||||
return ErrInvalidLengthRunhcs
|
return ErrInvalidLengthRunhcs
|
||||||
}
|
}
|
||||||
if (iNdEx + skippy) > l {
|
if (iNdEx + skippy) > l {
|
||||||
@ -1411,10 +1680,7 @@ func (m *ProcessDetails) Unmarshal(dAtA []byte) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if skippy < 0 {
|
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||||
return ErrInvalidLengthRunhcs
|
|
||||||
}
|
|
||||||
if (iNdEx + skippy) < 0 {
|
|
||||||
return ErrInvalidLengthRunhcs
|
return ErrInvalidLengthRunhcs
|
||||||
}
|
}
|
||||||
if (iNdEx + skippy) > l {
|
if (iNdEx + skippy) > l {
|
||||||
@ -1433,6 +1699,7 @@ func (m *ProcessDetails) Unmarshal(dAtA []byte) error {
|
|||||||
func skipRunhcs(dAtA []byte) (n int, err error) {
|
func skipRunhcs(dAtA []byte) (n int, err error) {
|
||||||
l := len(dAtA)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
|
depth := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
var wire uint64
|
var wire uint64
|
||||||
for shift := uint(0); ; shift += 7 {
|
for shift := uint(0); ; shift += 7 {
|
||||||
@ -1464,10 +1731,8 @@ func skipRunhcs(dAtA []byte) (n int, err error) {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return iNdEx, nil
|
|
||||||
case 1:
|
case 1:
|
||||||
iNdEx += 8
|
iNdEx += 8
|
||||||
return iNdEx, nil
|
|
||||||
case 2:
|
case 2:
|
||||||
var length int
|
var length int
|
||||||
for shift := uint(0); ; shift += 7 {
|
for shift := uint(0); ; shift += 7 {
|
||||||
@ -1488,55 +1753,30 @@ func skipRunhcs(dAtA []byte) (n int, err error) {
|
|||||||
return 0, ErrInvalidLengthRunhcs
|
return 0, ErrInvalidLengthRunhcs
|
||||||
}
|
}
|
||||||
iNdEx += length
|
iNdEx += length
|
||||||
if iNdEx < 0 {
|
|
||||||
return 0, ErrInvalidLengthRunhcs
|
|
||||||
}
|
|
||||||
return iNdEx, nil
|
|
||||||
case 3:
|
case 3:
|
||||||
for {
|
depth++
|
||||||
var innerWire uint64
|
|
||||||
var start int = iNdEx
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return 0, ErrIntOverflowRunhcs
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return 0, io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
innerWire |= (uint64(b) & 0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
innerWireType := int(innerWire & 0x7)
|
|
||||||
if innerWireType == 4 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
next, err := skipRunhcs(dAtA[start:])
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
iNdEx = start + next
|
|
||||||
if iNdEx < 0 {
|
|
||||||
return 0, ErrInvalidLengthRunhcs
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return iNdEx, nil
|
|
||||||
case 4:
|
case 4:
|
||||||
return iNdEx, nil
|
if depth == 0 {
|
||||||
|
return 0, ErrUnexpectedEndOfGroupRunhcs
|
||||||
|
}
|
||||||
|
depth--
|
||||||
case 5:
|
case 5:
|
||||||
iNdEx += 4
|
iNdEx += 4
|
||||||
return iNdEx, nil
|
|
||||||
default:
|
default:
|
||||||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||||
}
|
}
|
||||||
|
if iNdEx < 0 {
|
||||||
|
return 0, ErrInvalidLengthRunhcs
|
||||||
}
|
}
|
||||||
panic("unreachable")
|
if depth == 0 {
|
||||||
|
return iNdEx, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
ErrInvalidLengthRunhcs = fmt.Errorf("proto: negative length found during unmarshaling")
|
ErrInvalidLengthRunhcs = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||||
ErrIntOverflowRunhcs = fmt.Errorf("proto: integer overflow")
|
ErrIntOverflowRunhcs = fmt.Errorf("proto: integer overflow")
|
||||||
|
ErrUnexpectedEndOfGroupRunhcs = fmt.Errorf("proto: unexpected end of group")
|
||||||
)
|
)
|
||||||
|
10
vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options/runhcs.proto
generated
vendored
10
vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options/runhcs.proto
generated
vendored
@ -98,6 +98,16 @@ message Options {
|
|||||||
// The typical example is if Containerd has restarted but is expected to come back online. A 0 for this field is interpreted as an infinite
|
// The typical example is if Containerd has restarted but is expected to come back online. A 0 for this field is interpreted as an infinite
|
||||||
// timeout.
|
// timeout.
|
||||||
int32 io_retry_timeout_in_sec = 17;
|
int32 io_retry_timeout_in_sec = 17;
|
||||||
|
|
||||||
|
// default_container_annotations specifies a set of annotations that should be set for every workload container
|
||||||
|
map<string, string> default_container_annotations = 18;
|
||||||
|
|
||||||
|
// no_inherit_host_timezone specifies to skip inheriting the hosts time zone for WCOW UVMs and instead default to
|
||||||
|
// UTC.
|
||||||
|
bool no_inherit_host_timezone = 19;
|
||||||
|
|
||||||
|
// scrub_logs enables removing environment variables and other potentially sensitive information from logs
|
||||||
|
bool scrub_logs = 20;
|
||||||
}
|
}
|
||||||
|
|
||||||
// ProcessDetails contains additional information about a process. This is the additional
|
// ProcessDetails contains additional information about a process. This is the additional
|
||||||
|
765
vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats/stats.pb.go
generated
vendored
765
vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats/stats.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
6
vendor/github.com/Microsoft/hcsshim/computestorage/attach.go
generated
vendored
6
vendor/github.com/Microsoft/hcsshim/computestorage/attach.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
package computestorage
|
package computestorage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -17,8 +19,8 @@ import (
|
|||||||
//
|
//
|
||||||
// `layerData` is the parent read-only layer data.
|
// `layerData` is the parent read-only layer data.
|
||||||
func AttachLayerStorageFilter(ctx context.Context, layerPath string, layerData LayerData) (err error) {
|
func AttachLayerStorageFilter(ctx context.Context, layerPath string, layerData LayerData) (err error) {
|
||||||
title := "hcsshim.AttachLayerStorageFilter"
|
title := "hcsshim::AttachLayerStorageFilter"
|
||||||
ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
|
ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
|
||||||
defer span.End()
|
defer span.End()
|
||||||
defer func() { oc.SetSpanStatus(span, err) }()
|
defer func() { oc.SetSpanStatus(span, err) }()
|
||||||
span.AddAttributes(
|
span.AddAttributes(
|
||||||
|
6
vendor/github.com/Microsoft/hcsshim/computestorage/destroy.go
generated
vendored
6
vendor/github.com/Microsoft/hcsshim/computestorage/destroy.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
package computestorage
|
package computestorage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -12,8 +14,8 @@ import (
|
|||||||
//
|
//
|
||||||
// `layerPath` is a path to a directory containing the layer to export.
|
// `layerPath` is a path to a directory containing the layer to export.
|
||||||
func DestroyLayer(ctx context.Context, layerPath string) (err error) {
|
func DestroyLayer(ctx context.Context, layerPath string) (err error) {
|
||||||
title := "hcsshim.DestroyLayer"
|
title := "hcsshim::DestroyLayer"
|
||||||
ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
|
ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
|
||||||
defer span.End()
|
defer span.End()
|
||||||
defer func() { oc.SetSpanStatus(span, err) }()
|
defer func() { oc.SetSpanStatus(span, err) }()
|
||||||
span.AddAttributes(trace.StringAttribute("layerPath", layerPath))
|
span.AddAttributes(trace.StringAttribute("layerPath", layerPath))
|
||||||
|
6
vendor/github.com/Microsoft/hcsshim/computestorage/detach.go
generated
vendored
6
vendor/github.com/Microsoft/hcsshim/computestorage/detach.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
package computestorage
|
package computestorage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -12,8 +14,8 @@ import (
|
|||||||
//
|
//
|
||||||
// `layerPath` is a path to a directory containing the layer to export.
|
// `layerPath` is a path to a directory containing the layer to export.
|
||||||
func DetachLayerStorageFilter(ctx context.Context, layerPath string) (err error) {
|
func DetachLayerStorageFilter(ctx context.Context, layerPath string) (err error) {
|
||||||
title := "hcsshim.DetachLayerStorageFilter"
|
title := "hcsshim::DetachLayerStorageFilter"
|
||||||
ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
|
ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
|
||||||
defer span.End()
|
defer span.End()
|
||||||
defer func() { oc.SetSpanStatus(span, err) }()
|
defer func() { oc.SetSpanStatus(span, err) }()
|
||||||
span.AddAttributes(trace.StringAttribute("layerPath", layerPath))
|
span.AddAttributes(trace.StringAttribute("layerPath", layerPath))
|
||||||
|
12
vendor/github.com/Microsoft/hcsshim/computestorage/export.go
generated
vendored
12
vendor/github.com/Microsoft/hcsshim/computestorage/export.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
package computestorage
|
package computestorage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -19,8 +21,8 @@ import (
|
|||||||
//
|
//
|
||||||
// `options` are the export options applied to the exported layer.
|
// `options` are the export options applied to the exported layer.
|
||||||
func ExportLayer(ctx context.Context, layerPath, exportFolderPath string, layerData LayerData, options ExportLayerOptions) (err error) {
|
func ExportLayer(ctx context.Context, layerPath, exportFolderPath string, layerData LayerData, options ExportLayerOptions) (err error) {
|
||||||
title := "hcsshim.ExportLayer"
|
title := "hcsshim::ExportLayer"
|
||||||
ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
|
ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
|
||||||
defer span.End()
|
defer span.End()
|
||||||
defer func() { oc.SetSpanStatus(span, err) }()
|
defer func() { oc.SetSpanStatus(span, err) }()
|
||||||
span.AddAttributes(
|
span.AddAttributes(
|
||||||
@ -28,17 +30,17 @@ func ExportLayer(ctx context.Context, layerPath, exportFolderPath string, layerD
|
|||||||
trace.StringAttribute("exportFolderPath", exportFolderPath),
|
trace.StringAttribute("exportFolderPath", exportFolderPath),
|
||||||
)
|
)
|
||||||
|
|
||||||
ldbytes, err := json.Marshal(layerData)
|
ldBytes, err := json.Marshal(layerData)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
obytes, err := json.Marshal(options)
|
oBytes, err := json.Marshal(options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = hcsExportLayer(layerPath, exportFolderPath, string(ldbytes), string(obytes))
|
err = hcsExportLayer(layerPath, exportFolderPath, string(ldBytes), string(oBytes))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to export layer")
|
return errors.Wrap(err, "failed to export layer")
|
||||||
}
|
}
|
||||||
|
12
vendor/github.com/Microsoft/hcsshim/computestorage/format.go
generated
vendored
12
vendor/github.com/Microsoft/hcsshim/computestorage/format.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
package computestorage
|
package computestorage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -5,16 +7,20 @@ import (
|
|||||||
|
|
||||||
"github.com/Microsoft/hcsshim/internal/oc"
|
"github.com/Microsoft/hcsshim/internal/oc"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"go.opencensus.io/trace"
|
|
||||||
"golang.org/x/sys/windows"
|
"golang.org/x/sys/windows"
|
||||||
)
|
)
|
||||||
|
|
||||||
// FormatWritableLayerVhd formats a virtual disk for use as a writable container layer.
|
// FormatWritableLayerVhd formats a virtual disk for use as a writable container layer.
|
||||||
//
|
//
|
||||||
// If the VHD is not mounted it will be temporarily mounted.
|
// If the VHD is not mounted it will be temporarily mounted.
|
||||||
|
//
|
||||||
|
// NOTE: This API had a breaking change in the operating system after Windows Server 2019.
|
||||||
|
// On ws2019 the API expects to get passed a file handle from CreateFile for the vhd that
|
||||||
|
// the caller wants to format. On > ws2019, its expected that the caller passes a vhd handle
|
||||||
|
// that can be obtained from the virtdisk APIs.
|
||||||
func FormatWritableLayerVhd(ctx context.Context, vhdHandle windows.Handle) (err error) {
|
func FormatWritableLayerVhd(ctx context.Context, vhdHandle windows.Handle) (err error) {
|
||||||
title := "hcsshim.FormatWritableLayerVhd"
|
title := "hcsshim::FormatWritableLayerVhd"
|
||||||
ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
|
ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
|
||||||
defer span.End()
|
defer span.End()
|
||||||
defer func() { oc.SetSpanStatus(span, err) }()
|
defer func() { oc.SetSpanStatus(span, err) }()
|
||||||
|
|
||||||
|
14
vendor/github.com/Microsoft/hcsshim/computestorage/helpers.go
generated
vendored
14
vendor/github.com/Microsoft/hcsshim/computestorage/helpers.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
package computestorage
|
package computestorage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -6,10 +8,12 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
"github.com/Microsoft/go-winio/pkg/security"
|
|
||||||
"github.com/Microsoft/go-winio/vhd"
|
"github.com/Microsoft/go-winio/vhd"
|
||||||
|
"github.com/Microsoft/hcsshim/internal/memory"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"golang.org/x/sys/windows"
|
"golang.org/x/sys/windows"
|
||||||
|
|
||||||
|
"github.com/Microsoft/hcsshim/internal/security"
|
||||||
)
|
)
|
||||||
|
|
||||||
const defaultVHDXBlockSizeInMB = 1
|
const defaultVHDXBlockSizeInMB = 1
|
||||||
@ -59,8 +63,8 @@ func SetupContainerBaseLayer(ctx context.Context, layerPath, baseVhdPath, diffVh
|
|||||||
createParams := &vhd.CreateVirtualDiskParameters{
|
createParams := &vhd.CreateVirtualDiskParameters{
|
||||||
Version: 2,
|
Version: 2,
|
||||||
Version2: vhd.CreateVersion2{
|
Version2: vhd.CreateVersion2{
|
||||||
MaximumSize: sizeInGB * 1024 * 1024 * 1024,
|
MaximumSize: sizeInGB * memory.GiB,
|
||||||
BlockSizeInBytes: defaultVHDXBlockSizeInMB * 1024 * 1024,
|
BlockSizeInBytes: defaultVHDXBlockSizeInMB * memory.MiB,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
handle, err := vhd.CreateVirtualDisk(baseVhdPath, vhd.VirtualDiskAccessNone, vhd.CreateVirtualDiskFlagNone, createParams)
|
handle, err := vhd.CreateVirtualDisk(baseVhdPath, vhd.VirtualDiskAccessNone, vhd.CreateVirtualDiskFlagNone, createParams)
|
||||||
@ -135,8 +139,8 @@ func SetupUtilityVMBaseLayer(ctx context.Context, uvmPath, baseVhdPath, diffVhdP
|
|||||||
createParams := &vhd.CreateVirtualDiskParameters{
|
createParams := &vhd.CreateVirtualDiskParameters{
|
||||||
Version: 2,
|
Version: 2,
|
||||||
Version2: vhd.CreateVersion2{
|
Version2: vhd.CreateVersion2{
|
||||||
MaximumSize: sizeInGB * 1024 * 1024 * 1024,
|
MaximumSize: sizeInGB * memory.GiB,
|
||||||
BlockSizeInBytes: defaultVHDXBlockSizeInMB * 1024 * 1024,
|
BlockSizeInBytes: defaultVHDXBlockSizeInMB * memory.MiB,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
handle, err := vhd.CreateVirtualDisk(baseVhdPath, vhd.VirtualDiskAccessNone, vhd.CreateVirtualDiskFlagNone, createParams)
|
handle, err := vhd.CreateVirtualDisk(baseVhdPath, vhd.VirtualDiskAccessNone, vhd.CreateVirtualDiskFlagNone, createParams)
|
||||||
|
6
vendor/github.com/Microsoft/hcsshim/computestorage/import.go
generated
vendored
6
vendor/github.com/Microsoft/hcsshim/computestorage/import.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
package computestorage
|
package computestorage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -19,8 +21,8 @@ import (
|
|||||||
//
|
//
|
||||||
// `layerData` is the parent layer data.
|
// `layerData` is the parent layer data.
|
||||||
func ImportLayer(ctx context.Context, layerPath, sourceFolderPath string, layerData LayerData) (err error) {
|
func ImportLayer(ctx context.Context, layerPath, sourceFolderPath string, layerData LayerData) (err error) {
|
||||||
title := "hcsshim.ImportLayer"
|
title := "hcsshim::ImportLayer"
|
||||||
ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
|
ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
|
||||||
defer span.End()
|
defer span.End()
|
||||||
defer func() { oc.SetSpanStatus(span, err) }()
|
defer func() { oc.SetSpanStatus(span, err) }()
|
||||||
span.AddAttributes(
|
span.AddAttributes(
|
||||||
|
6
vendor/github.com/Microsoft/hcsshim/computestorage/initialize.go
generated
vendored
6
vendor/github.com/Microsoft/hcsshim/computestorage/initialize.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
package computestorage
|
package computestorage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -16,8 +18,8 @@ import (
|
|||||||
//
|
//
|
||||||
// `layerData` is the parent read-only layer data.
|
// `layerData` is the parent read-only layer data.
|
||||||
func InitializeWritableLayer(ctx context.Context, layerPath string, layerData LayerData) (err error) {
|
func InitializeWritableLayer(ctx context.Context, layerPath string, layerData LayerData) (err error) {
|
||||||
title := "hcsshim.InitializeWritableLayer"
|
title := "hcsshim::InitializeWritableLayer"
|
||||||
ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
|
ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
|
||||||
defer span.End()
|
defer span.End()
|
||||||
defer func() { oc.SetSpanStatus(span, err) }()
|
defer func() { oc.SetSpanStatus(span, err) }()
|
||||||
span.AddAttributes(
|
span.AddAttributes(
|
||||||
|
7
vendor/github.com/Microsoft/hcsshim/computestorage/mount.go
generated
vendored
7
vendor/github.com/Microsoft/hcsshim/computestorage/mount.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
package computestorage
|
package computestorage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -6,14 +8,13 @@ import (
|
|||||||
"github.com/Microsoft/hcsshim/internal/interop"
|
"github.com/Microsoft/hcsshim/internal/interop"
|
||||||
"github.com/Microsoft/hcsshim/internal/oc"
|
"github.com/Microsoft/hcsshim/internal/oc"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"go.opencensus.io/trace"
|
|
||||||
"golang.org/x/sys/windows"
|
"golang.org/x/sys/windows"
|
||||||
)
|
)
|
||||||
|
|
||||||
// GetLayerVhdMountPath returns the volume path for a virtual disk of a writable container layer.
|
// GetLayerVhdMountPath returns the volume path for a virtual disk of a writable container layer.
|
||||||
func GetLayerVhdMountPath(ctx context.Context, vhdHandle windows.Handle) (path string, err error) {
|
func GetLayerVhdMountPath(ctx context.Context, vhdHandle windows.Handle) (path string, err error) {
|
||||||
title := "hcsshim.GetLayerVhdMountPath"
|
title := "hcsshim::GetLayerVhdMountPath"
|
||||||
ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
|
ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
|
||||||
defer span.End()
|
defer span.End()
|
||||||
defer func() { oc.SetSpanStatus(span, err) }()
|
defer func() { oc.SetSpanStatus(span, err) }()
|
||||||
|
|
||||||
|
14
vendor/github.com/Microsoft/hcsshim/computestorage/setup.go
generated
vendored
14
vendor/github.com/Microsoft/hcsshim/computestorage/setup.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
package computestorage
|
package computestorage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -21,8 +23,8 @@ import (
|
|||||||
//
|
//
|
||||||
// `options` are the options applied while processing the layer.
|
// `options` are the options applied while processing the layer.
|
||||||
func SetupBaseOSLayer(ctx context.Context, layerPath string, vhdHandle windows.Handle, options OsLayerOptions) (err error) {
|
func SetupBaseOSLayer(ctx context.Context, layerPath string, vhdHandle windows.Handle, options OsLayerOptions) (err error) {
|
||||||
title := "hcsshim.SetupBaseOSLayer"
|
title := "hcsshim::SetupBaseOSLayer"
|
||||||
ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
|
ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
|
||||||
defer span.End()
|
defer span.End()
|
||||||
defer func() { oc.SetSpanStatus(span, err) }()
|
defer func() { oc.SetSpanStatus(span, err) }()
|
||||||
span.AddAttributes(
|
span.AddAttributes(
|
||||||
@ -48,12 +50,16 @@ func SetupBaseOSLayer(ctx context.Context, layerPath string, vhdHandle windows.H
|
|||||||
// `volumePath` is the path to the volume to be used for setup.
|
// `volumePath` is the path to the volume to be used for setup.
|
||||||
//
|
//
|
||||||
// `options` are the options applied while processing the layer.
|
// `options` are the options applied while processing the layer.
|
||||||
|
//
|
||||||
|
// NOTE: This API is only available on builds of Windows greater than 19645. Inside we
|
||||||
|
// check if the hosts build has the API available by using 'GetVersion' which requires
|
||||||
|
// the calling application to be manifested. https://docs.microsoft.com/en-us/windows/win32/sbscs/manifests
|
||||||
func SetupBaseOSVolume(ctx context.Context, layerPath, volumePath string, options OsLayerOptions) (err error) {
|
func SetupBaseOSVolume(ctx context.Context, layerPath, volumePath string, options OsLayerOptions) (err error) {
|
||||||
if osversion.Build() < 19645 {
|
if osversion.Build() < 19645 {
|
||||||
return errors.New("SetupBaseOSVolume is not present on builds older than 19645")
|
return errors.New("SetupBaseOSVolume is not present on builds older than 19645")
|
||||||
}
|
}
|
||||||
title := "hcsshim.SetupBaseOSVolume"
|
title := "hcsshim::SetupBaseOSVolume"
|
||||||
ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
|
ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
|
||||||
defer span.End()
|
defer span.End()
|
||||||
defer func() { oc.SetSpanStatus(span, err) }()
|
defer func() { oc.SetSpanStatus(span, err) }()
|
||||||
span.AddAttributes(
|
span.AddAttributes(
|
||||||
|
7
vendor/github.com/Microsoft/hcsshim/computestorage/storage.go
generated
vendored
7
vendor/github.com/Microsoft/hcsshim/computestorage/storage.go
generated
vendored
@ -20,10 +20,13 @@ import (
|
|||||||
//sys hcsGetLayerVhdMountPath(vhdHandle windows.Handle, mountPath **uint16) (hr error) = computestorage.HcsGetLayerVhdMountPath?
|
//sys hcsGetLayerVhdMountPath(vhdHandle windows.Handle, mountPath **uint16) (hr error) = computestorage.HcsGetLayerVhdMountPath?
|
||||||
//sys hcsSetupBaseOSVolume(layerPath string, volumePath string, options string) (hr error) = computestorage.HcsSetupBaseOSVolume?
|
//sys hcsSetupBaseOSVolume(layerPath string, volumePath string, options string) (hr error) = computestorage.HcsSetupBaseOSVolume?
|
||||||
|
|
||||||
|
type Version = hcsschema.Version
|
||||||
|
type Layer = hcsschema.Layer
|
||||||
|
|
||||||
// LayerData is the data used to describe parent layer information.
|
// LayerData is the data used to describe parent layer information.
|
||||||
type LayerData struct {
|
type LayerData struct {
|
||||||
SchemaVersion hcsschema.Version `json:"SchemaVersion,omitempty"`
|
SchemaVersion Version `json:"SchemaVersion,omitempty"`
|
||||||
Layers []hcsschema.Layer `json:"Layers,omitempty"`
|
Layers []Layer `json:"Layers,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExportLayerOptions are the set of options that are used with the `computestorage.HcsExportLayer` syscall.
|
// ExportLayerOptions are the set of options that are used with the `computestorage.HcsExportLayer` syscall.
|
||||||
|
4
vendor/github.com/Microsoft/hcsshim/container.go
generated
vendored
4
vendor/github.com/Microsoft/hcsshim/container.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
package hcsshim
|
package hcsshim
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -60,7 +62,7 @@ type container struct {
|
|||||||
waitCh chan struct{}
|
waitCh chan struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// createComputeSystemAdditionalJSON is read from the environment at initialisation
|
// createContainerAdditionalJSON is read from the environment at initialization
|
||||||
// time. It allows an environment variable to define additional JSON which
|
// time. It allows an environment variable to define additional JSON which
|
||||||
// is merged in the CreateComputeSystem call to HCS.
|
// is merged in the CreateComputeSystem call to HCS.
|
||||||
var createContainerAdditionalJSON []byte
|
var createContainerAdditionalJSON []byte
|
||||||
|
5
vendor/github.com/Microsoft/hcsshim/errors.go
generated
vendored
5
vendor/github.com/Microsoft/hcsshim/errors.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
package hcsshim
|
package hcsshim
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -50,6 +52,9 @@ var (
|
|||||||
// ErrUnexpectedValue is an error encountered when hcs returns an invalid value
|
// ErrUnexpectedValue is an error encountered when hcs returns an invalid value
|
||||||
ErrUnexpectedValue = hcs.ErrUnexpectedValue
|
ErrUnexpectedValue = hcs.ErrUnexpectedValue
|
||||||
|
|
||||||
|
// ErrOperationDenied is an error when hcs attempts an operation that is explicitly denied
|
||||||
|
ErrOperationDenied = hcs.ErrOperationDenied
|
||||||
|
|
||||||
// ErrVmcomputeAlreadyStopped is an error encountered when a shutdown or terminate request is made on a stopped container
|
// ErrVmcomputeAlreadyStopped is an error encountered when a shutdown or terminate request is made on a stopped container
|
||||||
ErrVmcomputeAlreadyStopped = hcs.ErrVmcomputeAlreadyStopped
|
ErrVmcomputeAlreadyStopped = hcs.ErrVmcomputeAlreadyStopped
|
||||||
|
|
||||||
|
94
vendor/github.com/Microsoft/hcsshim/ext4/dmverity/dmverity.go
generated
vendored
94
vendor/github.com/Microsoft/hcsshim/ext4/dmverity/dmverity.go
generated
vendored
@ -1,6 +1,7 @@
|
|||||||
package dmverity
|
package dmverity
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bufio"
|
||||||
"bytes"
|
"bytes"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
@ -12,19 +13,29 @@ import (
|
|||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
"github.com/Microsoft/hcsshim/ext4/internal/compactext4"
|
"github.com/Microsoft/hcsshim/ext4/internal/compactext4"
|
||||||
|
"github.com/Microsoft/hcsshim/internal/memory"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
blockSize = compactext4.BlockSize
|
blockSize = compactext4.BlockSize
|
||||||
|
// MerkleTreeBufioSize is a default buffer size to use with bufio.Reader
|
||||||
|
MerkleTreeBufioSize = memory.MiB // 1MB
|
||||||
// RecommendedVHDSizeGB is the recommended size in GB for VHDs, which is not a hard limit.
|
// RecommendedVHDSizeGB is the recommended size in GB for VHDs, which is not a hard limit.
|
||||||
RecommendedVHDSizeGB = 128 * 1024 * 1024 * 1024
|
RecommendedVHDSizeGB = 128 * memory.GiB
|
||||||
|
// VeritySignature is a value written to dm-verity super-block.
|
||||||
|
VeritySignature = "verity"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
salt = bytes.Repeat([]byte{0}, 32)
|
||||||
|
sbSize = binary.Size(dmveritySuperblock{})
|
||||||
)
|
)
|
||||||
var salt = bytes.Repeat([]byte{0}, 32)
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
ErrSuperBlockReadFailure = errors.New("failed to read dm-verity super block")
|
ErrSuperBlockReadFailure = errors.New("failed to read dm-verity super block")
|
||||||
ErrSuperBlockParseFailure = errors.New("failed to parse dm-verity super block")
|
ErrSuperBlockParseFailure = errors.New("failed to parse dm-verity super block")
|
||||||
ErrRootHashReadFailure = errors.New("failed to read dm-verity root hash")
|
ErrRootHashReadFailure = errors.New("failed to read dm-verity root hash")
|
||||||
|
ErrNotVeritySuperBlock = errors.New("invalid dm-verity super-block signature")
|
||||||
)
|
)
|
||||||
|
|
||||||
type dmveritySuperblock struct {
|
type dmveritySuperblock struct {
|
||||||
@ -69,20 +80,19 @@ type VerityInfo struct {
|
|||||||
Version uint32
|
Version uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
// MerkleTree constructs dm-verity hash-tree for a given byte array with a fixed salt (0-byte) and algorithm (sha256).
|
// MerkleTree constructs dm-verity hash-tree for a given io.Reader with a fixed salt (0-byte) and algorithm (sha256).
|
||||||
func MerkleTree(data []byte) ([]byte, error) {
|
func MerkleTree(r io.Reader) ([]byte, error) {
|
||||||
layers := make([][]byte, 0)
|
layers := make([][]byte, 0)
|
||||||
|
currentLevel := r
|
||||||
|
|
||||||
currentLevel := bytes.NewBuffer(data)
|
for {
|
||||||
|
|
||||||
for currentLevel.Len() != blockSize {
|
|
||||||
blocks := currentLevel.Len() / blockSize
|
|
||||||
nextLevel := bytes.NewBuffer(make([]byte, 0))
|
nextLevel := bytes.NewBuffer(make([]byte, 0))
|
||||||
|
for {
|
||||||
for i := 0; i < blocks; i++ {
|
|
||||||
block := make([]byte, blockSize)
|
block := make([]byte, blockSize)
|
||||||
_, err := currentLevel.Read(block)
|
if _, err := io.ReadFull(currentLevel, block); err != nil {
|
||||||
if err != nil {
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
return nil, errors.Wrap(err, "failed to read data block")
|
return nil, errors.Wrap(err, "failed to read data block")
|
||||||
}
|
}
|
||||||
h := hash2(salt, block)
|
h := hash2(salt, block)
|
||||||
@ -92,14 +102,18 @@ func MerkleTree(data []byte) ([]byte, error) {
|
|||||||
padding := bytes.Repeat([]byte{0}, blockSize-(nextLevel.Len()%blockSize))
|
padding := bytes.Repeat([]byte{0}, blockSize-(nextLevel.Len()%blockSize))
|
||||||
nextLevel.Write(padding)
|
nextLevel.Write(padding)
|
||||||
|
|
||||||
currentLevel = nextLevel
|
layers = append(layers, nextLevel.Bytes())
|
||||||
layers = append(layers, currentLevel.Bytes())
|
currentLevel = bufio.NewReaderSize(nextLevel, MerkleTreeBufioSize)
|
||||||
|
|
||||||
|
// This means that only root hash remains and our job is done
|
||||||
|
if nextLevel.Len() == blockSize {
|
||||||
|
break
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var tree = bytes.NewBuffer(make([]byte, 0))
|
tree := bytes.NewBuffer(make([]byte, 0))
|
||||||
for i := len(layers) - 1; i >= 0; i-- {
|
for i := len(layers) - 1; i >= 0; i-- {
|
||||||
_, err := tree.Write(layers[i])
|
if _, err := tree.Write(layers[i]); err != nil {
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "failed to write merkle tree")
|
return nil, errors.Wrap(err, "failed to write merkle tree")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -125,7 +139,7 @@ func NewDMVeritySuperblock(size uint64) *dmveritySuperblock {
|
|||||||
SaltSize: uint16(len(salt)),
|
SaltSize: uint16(len(salt)),
|
||||||
}
|
}
|
||||||
|
|
||||||
copy(superblock.Signature[:], "verity")
|
copy(superblock.Signature[:], VeritySignature)
|
||||||
copy(superblock.Algorithm[:], "sha256")
|
copy(superblock.Algorithm[:], "sha256")
|
||||||
copy(superblock.Salt[:], salt)
|
copy(superblock.Salt[:], salt)
|
||||||
|
|
||||||
@ -165,7 +179,7 @@ func ReadDMVerityInfo(vhdPath string, offsetInBytes int64) (*VerityInfo, error)
|
|||||||
block := make([]byte, blockSize)
|
block := make([]byte, blockSize)
|
||||||
if s, err := vhd.Read(block); err != nil || s != blockSize {
|
if s, err := vhd.Read(block); err != nil || s != blockSize {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(ErrSuperBlockReadFailure, "%s", err)
|
return nil, errors.Wrapf(err, "%s", ErrSuperBlockReadFailure)
|
||||||
}
|
}
|
||||||
return nil, errors.Wrapf(ErrSuperBlockReadFailure, "unexpected bytes read: expected=%d, actual=%d", blockSize, s)
|
return nil, errors.Wrapf(ErrSuperBlockReadFailure, "unexpected bytes read: expected=%d, actual=%d", blockSize, s)
|
||||||
}
|
}
|
||||||
@ -173,13 +187,15 @@ func ReadDMVerityInfo(vhdPath string, offsetInBytes int64) (*VerityInfo, error)
|
|||||||
dmvSB := &dmveritySuperblock{}
|
dmvSB := &dmveritySuperblock{}
|
||||||
b := bytes.NewBuffer(block)
|
b := bytes.NewBuffer(block)
|
||||||
if err := binary.Read(b, binary.LittleEndian, dmvSB); err != nil {
|
if err := binary.Read(b, binary.LittleEndian, dmvSB); err != nil {
|
||||||
return nil, errors.Wrapf(ErrSuperBlockParseFailure, "%s", err)
|
return nil, errors.Wrapf(err, "%s", ErrSuperBlockParseFailure)
|
||||||
|
}
|
||||||
|
if string(bytes.Trim(dmvSB.Signature[:], "\x00")[:]) != VeritySignature {
|
||||||
|
return nil, ErrNotVeritySuperBlock
|
||||||
}
|
}
|
||||||
|
|
||||||
// read the merkle tree root
|
// read the merkle tree root
|
||||||
if s, err := vhd.Read(block); err != nil || s != blockSize {
|
if s, err := vhd.Read(block); err != nil || s != blockSize {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(ErrRootHashReadFailure, "%s", err)
|
return nil, errors.Wrapf(err, "%s", ErrRootHashReadFailure)
|
||||||
}
|
}
|
||||||
return nil, errors.Wrapf(ErrRootHashReadFailure, "unexpected bytes read: expected=%d, actual=%d", blockSize, s)
|
return nil, errors.Wrapf(ErrRootHashReadFailure, "unexpected bytes read: expected=%d, actual=%d", blockSize, s)
|
||||||
}
|
}
|
||||||
@ -196,3 +212,37 @@ func ReadDMVerityInfo(vhdPath string, offsetInBytes int64) (*VerityInfo, error)
|
|||||||
Version: dmvSB.Version,
|
Version: dmvSB.Version,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ComputeAndWriteHashDevice builds merkle tree from a given io.ReadSeeker and writes the result
|
||||||
|
// hash device (dm-verity super-block combined with merkle tree) to io.WriteSeeker.
|
||||||
|
func ComputeAndWriteHashDevice(r io.ReadSeeker, w io.WriteSeeker) error {
|
||||||
|
if _, err := r.Seek(0, io.SeekStart); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tree, err := MerkleTree(r)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "failed to build merkle tree")
|
||||||
|
}
|
||||||
|
|
||||||
|
devSize, err := r.Seek(0, io.SeekEnd)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
dmVeritySB := NewDMVeritySuperblock(uint64(devSize))
|
||||||
|
if _, err := w.Seek(0, io.SeekEnd); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := binary.Write(w, binary.LittleEndian, dmVeritySB); err != nil {
|
||||||
|
return errors.Wrap(err, "failed to write dm-verity super-block")
|
||||||
|
}
|
||||||
|
// write super-block padding
|
||||||
|
padding := bytes.Repeat([]byte{0}, blockSize-(sbSize%blockSize))
|
||||||
|
if _, err = w.Write(padding); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// write tree
|
||||||
|
if _, err := w.Write(tree); err != nil {
|
||||||
|
return errors.Wrap(err, "failed to write merkle tree")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
36
vendor/github.com/Microsoft/hcsshim/ext4/internal/compactext4/compact.go
generated
vendored
36
vendor/github.com/Microsoft/hcsshim/ext4/internal/compactext4/compact.go
generated
vendored
@ -13,6 +13,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/Microsoft/hcsshim/ext4/internal/format"
|
"github.com/Microsoft/hcsshim/ext4/internal/format"
|
||||||
|
"github.com/Microsoft/hcsshim/internal/memory"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Writer writes a compact ext4 file system.
|
// Writer writes a compact ext4 file system.
|
||||||
@ -101,13 +102,13 @@ const (
|
|||||||
maxInodesPerGroup = BlockSize * 8 // Limited by the inode bitmap
|
maxInodesPerGroup = BlockSize * 8 // Limited by the inode bitmap
|
||||||
inodesPerGroupIncrement = BlockSize / inodeSize
|
inodesPerGroupIncrement = BlockSize / inodeSize
|
||||||
|
|
||||||
defaultMaxDiskSize = 16 * 1024 * 1024 * 1024 // 16GB
|
defaultMaxDiskSize = 16 * memory.GiB // 16GB
|
||||||
maxMaxDiskSize = 16 * 1024 * 1024 * 1024 * 1024 // 16TB
|
maxMaxDiskSize = 16 * 1024 * 1024 * 1024 * 1024 // 16TB
|
||||||
|
|
||||||
groupDescriptorSize = 32 // Use the small group descriptor
|
groupDescriptorSize = 32 // Use the small group descriptor
|
||||||
groupsPerDescriptorBlock = BlockSize / groupDescriptorSize
|
groupsPerDescriptorBlock = BlockSize / groupDescriptorSize
|
||||||
|
|
||||||
maxFileSize = 128 * 1024 * 1024 * 1024 // 128GB file size maximum for now
|
maxFileSize = 128 * memory.GiB // 128GB file size maximum for now
|
||||||
smallSymlinkSize = 59 // max symlink size that goes directly in the inode
|
smallSymlinkSize = 59 // max symlink size that goes directly in the inode
|
||||||
maxBlocksPerExtent = 0x8000 // maximum number of blocks in an extent
|
maxBlocksPerExtent = 0x8000 // maximum number of blocks in an extent
|
||||||
inodeDataSize = 60
|
inodeDataSize = 60
|
||||||
@ -414,6 +415,15 @@ func (w *Writer) makeInode(f *File, node *inode) (*inode, error) {
|
|||||||
node.Devmajor = f.Devmajor
|
node.Devmajor = f.Devmajor
|
||||||
node.Devminor = f.Devminor
|
node.Devminor = f.Devminor
|
||||||
node.Data = nil
|
node.Data = nil
|
||||||
|
if f.Xattrs == nil {
|
||||||
|
f.Xattrs = make(map[string][]byte)
|
||||||
|
}
|
||||||
|
|
||||||
|
// copy over existing xattrs first, we need to merge existing xattrs and the passed xattrs.
|
||||||
|
existingXattrs := make(map[string][]byte)
|
||||||
|
if len(node.XattrInline) > 0 {
|
||||||
|
getXattrs(node.XattrInline[4:], existingXattrs, 0)
|
||||||
|
}
|
||||||
node.XattrInline = nil
|
node.XattrInline = nil
|
||||||
|
|
||||||
var xstate xattrState
|
var xstate xattrState
|
||||||
@ -452,6 +462,13 @@ func (w *Writer) makeInode(f *File, node *inode) (*inode, error) {
|
|||||||
return nil, fmt.Errorf("invalid mode %o", mode)
|
return nil, fmt.Errorf("invalid mode %o", mode)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// merge xattrs but prefer currently passed over existing
|
||||||
|
for name, data := range existingXattrs {
|
||||||
|
if _, ok := f.Xattrs[name]; !ok {
|
||||||
|
f.Xattrs[name] = data
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Accumulate the extended attributes.
|
// Accumulate the extended attributes.
|
||||||
if len(f.Xattrs) != 0 {
|
if len(f.Xattrs) != 0 {
|
||||||
// Sort the xattrs to avoid non-determinism in map iteration.
|
// Sort the xattrs to avoid non-determinism in map iteration.
|
||||||
@ -514,15 +531,16 @@ func (w *Writer) lookup(name string, mustExist bool) (*inode, *inode, string, er
|
|||||||
return dir, child, childname, nil
|
return dir, child, childname, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateWithParents adds a file to the file system creating the parent directories in the path if
|
// MakeParents ensures that all the parent directories in the path specified by `name` exists. If
|
||||||
// they don't exist (like `mkdir -p`). These non existing parent directories are created
|
// they don't exist it creates them (like `mkdir -p`). These non existing parent directories are created
|
||||||
// with the same permissions as that of it's parent directory. It is expected that the a
|
// with the same permissions as that of it's parent directory. It is expected that the a
|
||||||
// call to make these parent directories will be made at a later point with the correct
|
// call to make these parent directories will be made at a later point with the correct
|
||||||
// permissions, at that time the permissions of these directories will be updated.
|
// permissions, at that time the permissions of these directories will be updated.
|
||||||
func (w *Writer) CreateWithParents(name string, f *File) error {
|
func (w *Writer) MakeParents(name string) error {
|
||||||
if err := w.finishInode(); err != nil {
|
if err := w.finishInode(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// go through the directories in the path one by one and create the
|
// go through the directories in the path one by one and create the
|
||||||
// parent directories if they don't exist.
|
// parent directories if they don't exist.
|
||||||
cleanname := path.Clean("/" + name)[1:]
|
cleanname := path.Clean("/" + name)[1:]
|
||||||
@ -553,7 +571,7 @@ func (w *Writer) CreateWithParents(name string, f *File) error {
|
|||||||
}
|
}
|
||||||
root = root.Children[dirname]
|
root = root.Children[dirname]
|
||||||
}
|
}
|
||||||
return w.Create(name, f)
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create adds a file to the file system.
|
// Create adds a file to the file system.
|
||||||
@ -603,6 +621,8 @@ func (w *Writer) Create(name string, f *File) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Link adds a hard link to the file system.
|
// Link adds a hard link to the file system.
|
||||||
|
// We support creating hardlinks to symlinks themselves instead of what
|
||||||
|
// the symlinks link to, as this is what containerd does upstream.
|
||||||
func (w *Writer) Link(oldname, newname string) error {
|
func (w *Writer) Link(oldname, newname string) error {
|
||||||
if err := w.finishInode(); err != nil {
|
if err := w.finishInode(); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -620,8 +640,8 @@ func (w *Writer) Link(oldname, newname string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
switch oldfile.Mode & format.TypeMask {
|
switch oldfile.Mode & format.TypeMask {
|
||||||
case format.S_IFDIR, format.S_IFLNK:
|
case format.S_IFDIR:
|
||||||
return fmt.Errorf("%s: link target cannot be a directory or symlink: %s", newname, oldname)
|
return fmt.Errorf("%s: link target cannot be a directory: %s", newname, oldname)
|
||||||
}
|
}
|
||||||
|
|
||||||
if existing != oldfile && oldfile.LinkCount >= format.MaxLinks {
|
if existing != oldfile && oldfile.LinkCount >= format.MaxLinks {
|
||||||
|
134
vendor/github.com/Microsoft/hcsshim/ext4/tar2ext4/tar2ext4.go
generated
vendored
134
vendor/github.com/Microsoft/hcsshim/ext4/tar2ext4/tar2ext4.go
generated
vendored
@ -3,15 +3,14 @@ package tar2ext4
|
|||||||
import (
|
import (
|
||||||
"archive/tar"
|
"archive/tar"
|
||||||
"bufio"
|
"bufio"
|
||||||
"bytes"
|
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
"unsafe"
|
|
||||||
|
|
||||||
"github.com/Microsoft/hcsshim/ext4/dmverity"
|
"github.com/Microsoft/hcsshim/ext4/dmverity"
|
||||||
"github.com/Microsoft/hcsshim/ext4/internal/compactext4"
|
"github.com/Microsoft/hcsshim/ext4/internal/compactext4"
|
||||||
@ -65,16 +64,16 @@ func MaximumDiskSize(size int64) Option {
|
|||||||
const (
|
const (
|
||||||
whiteoutPrefix = ".wh."
|
whiteoutPrefix = ".wh."
|
||||||
opaqueWhiteout = ".wh..wh..opq"
|
opaqueWhiteout = ".wh..wh..opq"
|
||||||
ext4blocksize = compactext4.BlockSize
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Convert writes a compact ext4 file system image that contains the files in the
|
// ConvertTarToExt4 writes a compact ext4 file system image that contains the files in the
|
||||||
// input tar stream.
|
// input tar stream.
|
||||||
func Convert(r io.Reader, w io.ReadWriteSeeker, options ...Option) error {
|
func ConvertTarToExt4(r io.Reader, w io.ReadWriteSeeker, options ...Option) error {
|
||||||
var p params
|
var p params
|
||||||
for _, opt := range options {
|
for _, opt := range options {
|
||||||
opt(&p)
|
opt(&p)
|
||||||
}
|
}
|
||||||
|
|
||||||
t := tar.NewReader(bufio.NewReader(r))
|
t := tar.NewReader(bufio.NewReader(r))
|
||||||
fs := compactext4.NewWriter(w, p.ext4opts...)
|
fs := compactext4.NewWriter(w, p.ext4opts...)
|
||||||
for {
|
for {
|
||||||
@ -86,6 +85,10 @@ func Convert(r io.Reader, w io.ReadWriteSeeker, options ...Option) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err = fs.MakeParents(hdr.Name); err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to ensure parent directories for %s", hdr.Name)
|
||||||
|
}
|
||||||
|
|
||||||
if p.convertWhiteout {
|
if p.convertWhiteout {
|
||||||
dir, name := path.Split(hdr.Name)
|
dir, name := path.Split(hdr.Name)
|
||||||
if strings.HasPrefix(name, whiteoutPrefix) {
|
if strings.HasPrefix(name, whiteoutPrefix) {
|
||||||
@ -93,12 +96,12 @@ func Convert(r io.Reader, w io.ReadWriteSeeker, options ...Option) error {
|
|||||||
// Update the directory with the appropriate xattr.
|
// Update the directory with the appropriate xattr.
|
||||||
f, err := fs.Stat(dir)
|
f, err := fs.Stat(dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return errors.Wrapf(err, "failed to stat parent directory of whiteout %s", hdr.Name)
|
||||||
}
|
}
|
||||||
f.Xattrs["trusted.overlay.opaque"] = []byte("y")
|
f.Xattrs["trusted.overlay.opaque"] = []byte("y")
|
||||||
err = fs.Create(dir, f)
|
err = fs.Create(dir, f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return errors.Wrapf(err, "failed to create opaque dir %s", hdr.Name)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Create an overlay-style whiteout.
|
// Create an overlay-style whiteout.
|
||||||
@ -109,7 +112,7 @@ func Convert(r io.Reader, w io.ReadWriteSeeker, options ...Option) error {
|
|||||||
}
|
}
|
||||||
err = fs.Create(path.Join(dir, name[len(whiteoutPrefix):]), f)
|
err = fs.Create(path.Join(dir, name[len(whiteoutPrefix):]), f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return errors.Wrapf(err, "failed to create whiteout file for %s", hdr.Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -161,7 +164,7 @@ func Convert(r io.Reader, w io.ReadWriteSeeker, options ...Option) error {
|
|||||||
}
|
}
|
||||||
f.Mode &= ^compactext4.TypeMask
|
f.Mode &= ^compactext4.TypeMask
|
||||||
f.Mode |= typ
|
f.Mode |= typ
|
||||||
err = fs.CreateWithParents(hdr.Name, f)
|
err = fs.Create(hdr.Name, f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -171,67 +174,29 @@ func Convert(r io.Reader, w io.ReadWriteSeeker, options ...Option) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
err := fs.Close()
|
return fs.Close()
|
||||||
if err != nil {
|
}
|
||||||
|
|
||||||
|
// Convert wraps ConvertTarToExt4 and conditionally computes (and appends) the file image's cryptographic
|
||||||
|
// hashes (merkle tree) or/and appends a VHD footer.
|
||||||
|
func Convert(r io.Reader, w io.ReadWriteSeeker, options ...Option) error {
|
||||||
|
var p params
|
||||||
|
for _, opt := range options {
|
||||||
|
opt(&p)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := ConvertTarToExt4(r, w, options...); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if p.appendDMVerity {
|
if p.appendDMVerity {
|
||||||
ext4size, err := w.Seek(0, io.SeekEnd)
|
if err := dmverity.ComputeAndWriteHashDevice(w, w); err != nil {
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rewind the stream and then read it all into a []byte for
|
|
||||||
// dmverity processing
|
|
||||||
_, err = w.Seek(0, io.SeekStart)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
data, err := ioutil.ReadAll(w)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
mtree, err := dmverity.MerkleTree(data)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "failed to build merkle tree")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write dmverity superblock and then the merkle tree after the end of the
|
|
||||||
// ext4 filesystem
|
|
||||||
_, err = w.Seek(0, io.SeekEnd)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
superblock := dmverity.NewDMVeritySuperblock(uint64(ext4size))
|
|
||||||
err = binary.Write(w, binary.LittleEndian, superblock)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// pad the superblock
|
|
||||||
sbsize := int(unsafe.Sizeof(*superblock))
|
|
||||||
padding := bytes.Repeat([]byte{0}, ext4blocksize-(sbsize%ext4blocksize))
|
|
||||||
_, err = w.Write(padding)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// write the tree
|
|
||||||
_, err = w.Write(mtree)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if p.appendVhdFooter {
|
if p.appendVhdFooter {
|
||||||
size, err := w.Seek(0, io.SeekEnd)
|
return ConvertToVhd(w)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = binary.Write(w, binary.BigEndian, makeFixedVHDFooter(size))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -266,5 +231,52 @@ func ReadExt4SuperBlock(vhdPath string) (*format.SuperBlock, error) {
|
|||||||
if err := binary.Read(vhd, binary.LittleEndian, &sb); err != nil {
|
if err := binary.Read(vhd, binary.LittleEndian, &sb); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
// Make sure the magic bytes are correct.
|
||||||
|
if sb.Magic != format.SuperBlockMagic {
|
||||||
|
return nil, errors.New("not an ext4 file system")
|
||||||
|
}
|
||||||
return &sb, nil
|
return &sb, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ConvertAndComputeRootDigest writes a compact ext4 file system image that contains the files in the
|
||||||
|
// input tar stream, computes the resulting file image's cryptographic hashes (merkle tree) and returns
|
||||||
|
// merkle tree root digest. Convert is called with minimal options: ConvertWhiteout and MaximumDiskSize
|
||||||
|
// set to dmverity.RecommendedVHDSizeGB.
|
||||||
|
func ConvertAndComputeRootDigest(r io.Reader) (string, error) {
|
||||||
|
out, err := ioutil.TempFile("", "")
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("failed to create temporary file: %s", err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
_ = os.Remove(out.Name())
|
||||||
|
}()
|
||||||
|
|
||||||
|
options := []Option{
|
||||||
|
ConvertWhiteout,
|
||||||
|
MaximumDiskSize(dmverity.RecommendedVHDSizeGB),
|
||||||
|
}
|
||||||
|
if err := ConvertTarToExt4(r, out, options...); err != nil {
|
||||||
|
return "", fmt.Errorf("failed to convert tar to ext4: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := out.Seek(0, io.SeekStart); err != nil {
|
||||||
|
return "", fmt.Errorf("failed to seek start on temp file when creating merkle tree: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
tree, err := dmverity.MerkleTree(bufio.NewReaderSize(out, dmverity.MerkleTreeBufioSize))
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("failed to create merkle tree: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
hash := dmverity.RootHash(tree)
|
||||||
|
return fmt.Sprintf("%x", hash), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConvertToVhd converts given io.WriteSeeker to VHD, by appending the VHD footer with a fixed size.
|
||||||
|
func ConvertToVhd(w io.WriteSeeker) error {
|
||||||
|
size, err := w.Seek(0, io.SeekEnd)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return binary.Write(w, binary.BigEndian, makeFixedVHDFooter(size))
|
||||||
|
}
|
||||||
|
12
vendor/github.com/Microsoft/hcsshim/functional_tests.ps1
generated
vendored
12
vendor/github.com/Microsoft/hcsshim/functional_tests.ps1
generated
vendored
@ -1,12 +0,0 @@
|
|||||||
# Requirements so far:
|
|
||||||
# dockerd running
|
|
||||||
# - image microsoft/nanoserver (matching host base image) docker load -i c:\baseimages\nanoserver.tar
|
|
||||||
# - image alpine (linux) docker pull --platform=linux alpine
|
|
||||||
|
|
||||||
|
|
||||||
# TODO: Add this a parameter for debugging. ie "functional-tests -debug=$true"
|
|
||||||
#$env:HCSSHIM_FUNCTIONAL_TESTS_DEBUG="yes please"
|
|
||||||
|
|
||||||
#pushd uvm
|
|
||||||
go test -v -tags "functional uvmcreate uvmscratch uvmscsi uvmvpmem uvmvsmb uvmp9" ./...
|
|
||||||
#popd
|
|
3
vendor/github.com/Microsoft/hcsshim/hcn/doc.go
generated
vendored
Normal file
3
vendor/github.com/Microsoft/hcsshim/hcn/doc.go
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
// Package hcn is a shim for the Host Compute Networking (HCN) service, which manages networking for Windows Server
|
||||||
|
// containers and Hyper-V containers. Previous to RS5, HCN was referred to as Host Networking Service (HNS).
|
||||||
|
package hcn
|
8
vendor/github.com/Microsoft/hcsshim/hcn/hcn.go
generated
vendored
8
vendor/github.com/Microsoft/hcsshim/hcn/hcn.go
generated
vendored
@ -1,5 +1,5 @@
|
|||||||
// Package hcn is a shim for the Host Compute Networking (HCN) service, which manages networking for Windows Server
|
//go:build windows
|
||||||
// containers and Hyper-V containers. Previous to RS5, HCN was referred to as Host Networking Service (HNS).
|
|
||||||
package hcn
|
package hcn
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -228,7 +228,7 @@ func IPv6DualStackSupported() error {
|
|||||||
return platformDoesNotSupportError("IPv6 DualStack")
|
return platformDoesNotSupportError("IPv6 DualStack")
|
||||||
}
|
}
|
||||||
|
|
||||||
//L4proxySupported returns an error if the HCN verison does not support L4Proxy
|
//L4proxySupported returns an error if the HCN version does not support L4Proxy
|
||||||
func L4proxyPolicySupported() error {
|
func L4proxyPolicySupported() error {
|
||||||
supported, err := GetCachedSupportedFeatures()
|
supported, err := GetCachedSupportedFeatures()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -240,7 +240,7 @@ func L4proxyPolicySupported() error {
|
|||||||
return platformDoesNotSupportError("L4ProxyPolicy")
|
return platformDoesNotSupportError("L4ProxyPolicy")
|
||||||
}
|
}
|
||||||
|
|
||||||
// L4WfpProxySupported returns an error if the HCN verison does not support L4WfpProxy
|
// L4WfpProxySupported returns an error if the HCN version does not support L4WfpProxy
|
||||||
func L4WfpProxyPolicySupported() error {
|
func L4WfpProxyPolicySupported() error {
|
||||||
supported, err := GetCachedSupportedFeatures()
|
supported, err := GetCachedSupportedFeatures()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
4
vendor/github.com/Microsoft/hcsshim/hcn/hcnendpoint.go
generated
vendored
4
vendor/github.com/Microsoft/hcsshim/hcn/hcnendpoint.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
package hcn
|
package hcn
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -9,7 +11,7 @@ import (
|
|||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
// IpConfig is assoicated with an endpoint
|
// IpConfig is associated with an endpoint
|
||||||
type IpConfig struct {
|
type IpConfig struct {
|
||||||
IpAddress string `json:",omitempty"`
|
IpAddress string `json:",omitempty"`
|
||||||
PrefixLength uint8 `json:",omitempty"`
|
PrefixLength uint8 `json:",omitempty"`
|
||||||
|
16
vendor/github.com/Microsoft/hcsshim/hcn/hcnerrors.go
generated
vendored
16
vendor/github.com/Microsoft/hcsshim/hcn/hcnerrors.go
generated
vendored
@ -1,5 +1,5 @@
|
|||||||
// Package hcn is a shim for the Host Compute Networking (HCN) service, which manages networking for Windows Server
|
//go:build windows
|
||||||
// containers and Hyper-V containers. Previous to RS5, HCN was referred to as Host Networking Service (HNS).
|
|
||||||
package hcn
|
package hcn
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -87,10 +87,10 @@ func new(hr error, title string, rest string) error {
|
|||||||
|
|
||||||
//
|
//
|
||||||
// Note that the below errors are not errors returned by hcn itself
|
// Note that the below errors are not errors returned by hcn itself
|
||||||
// we wish to seperate them as they are shim usage error
|
// we wish to separate them as they are shim usage error
|
||||||
//
|
//
|
||||||
|
|
||||||
// NetworkNotFoundError results from a failed seach for a network by Id or Name
|
// NetworkNotFoundError results from a failed search for a network by Id or Name
|
||||||
type NetworkNotFoundError struct {
|
type NetworkNotFoundError struct {
|
||||||
NetworkName string
|
NetworkName string
|
||||||
NetworkID string
|
NetworkID string
|
||||||
@ -103,7 +103,7 @@ func (e NetworkNotFoundError) Error() string {
|
|||||||
return fmt.Sprintf("Network ID %q not found", e.NetworkID)
|
return fmt.Sprintf("Network ID %q not found", e.NetworkID)
|
||||||
}
|
}
|
||||||
|
|
||||||
// EndpointNotFoundError results from a failed seach for an endpoint by Id or Name
|
// EndpointNotFoundError results from a failed search for an endpoint by Id or Name
|
||||||
type EndpointNotFoundError struct {
|
type EndpointNotFoundError struct {
|
||||||
EndpointName string
|
EndpointName string
|
||||||
EndpointID string
|
EndpointID string
|
||||||
@ -116,7 +116,7 @@ func (e EndpointNotFoundError) Error() string {
|
|||||||
return fmt.Sprintf("Endpoint ID %q not found", e.EndpointID)
|
return fmt.Sprintf("Endpoint ID %q not found", e.EndpointID)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NamespaceNotFoundError results from a failed seach for a namsepace by Id
|
// NamespaceNotFoundError results from a failed search for a namsepace by Id
|
||||||
type NamespaceNotFoundError struct {
|
type NamespaceNotFoundError struct {
|
||||||
NamespaceID string
|
NamespaceID string
|
||||||
}
|
}
|
||||||
@ -125,7 +125,7 @@ func (e NamespaceNotFoundError) Error() string {
|
|||||||
return fmt.Sprintf("Namespace ID %q not found", e.NamespaceID)
|
return fmt.Sprintf("Namespace ID %q not found", e.NamespaceID)
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadBalancerNotFoundError results from a failed seach for a loadbalancer by Id
|
// LoadBalancerNotFoundError results from a failed search for a loadbalancer by Id
|
||||||
type LoadBalancerNotFoundError struct {
|
type LoadBalancerNotFoundError struct {
|
||||||
LoadBalancerId string
|
LoadBalancerId string
|
||||||
}
|
}
|
||||||
@ -134,7 +134,7 @@ func (e LoadBalancerNotFoundError) Error() string {
|
|||||||
return fmt.Sprintf("LoadBalancer %q not found", e.LoadBalancerId)
|
return fmt.Sprintf("LoadBalancer %q not found", e.LoadBalancerId)
|
||||||
}
|
}
|
||||||
|
|
||||||
// RouteNotFoundError results from a failed seach for a route by Id
|
// RouteNotFoundError results from a failed search for a route by Id
|
||||||
type RouteNotFoundError struct {
|
type RouteNotFoundError struct {
|
||||||
RouteId string
|
RouteId string
|
||||||
}
|
}
|
||||||
|
2
vendor/github.com/Microsoft/hcsshim/hcn/hcnglobals.go
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/hcn/hcnglobals.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
package hcn
|
package hcn
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
2
vendor/github.com/Microsoft/hcsshim/hcn/hcnloadbalancer.go
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/hcn/hcnloadbalancer.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
package hcn
|
package hcn
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
12
vendor/github.com/Microsoft/hcsshim/hcn/hcnnamespace.go
generated
vendored
12
vendor/github.com/Microsoft/hcsshim/hcn/hcnnamespace.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
package hcn
|
package hcn
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -27,7 +29,7 @@ type NamespaceResourceContainer struct {
|
|||||||
type NamespaceResourceType string
|
type NamespaceResourceType string
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// NamespaceResourceTypeContainer are contianers associated with a Namespace.
|
// NamespaceResourceTypeContainer are containers associated with a Namespace.
|
||||||
NamespaceResourceTypeContainer NamespaceResourceType = "Container"
|
NamespaceResourceTypeContainer NamespaceResourceType = "Container"
|
||||||
// NamespaceResourceTypeEndpoint are endpoints associated with a Namespace.
|
// NamespaceResourceTypeEndpoint are endpoints associated with a Namespace.
|
||||||
NamespaceResourceTypeEndpoint NamespaceResourceType = "Endpoint"
|
NamespaceResourceTypeEndpoint NamespaceResourceType = "Endpoint"
|
||||||
@ -294,11 +296,11 @@ func GetNamespaceContainerIds(namespaceId string) ([]string, error) {
|
|||||||
var containerIds []string
|
var containerIds []string
|
||||||
for _, resource := range namespace.Resources {
|
for _, resource := range namespace.Resources {
|
||||||
if resource.Type == "Container" {
|
if resource.Type == "Container" {
|
||||||
var contaienrResource NamespaceResourceContainer
|
var containerResource NamespaceResourceContainer
|
||||||
if err := json.Unmarshal([]byte(resource.Data), &contaienrResource); err != nil {
|
if err := json.Unmarshal([]byte(resource.Data), &containerResource); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
containerIds = append(containerIds, contaienrResource.Id)
|
containerIds = append(containerIds, containerResource.Id)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return containerIds, nil
|
return containerIds, nil
|
||||||
@ -375,7 +377,7 @@ func (namespace *HostComputeNamespace) Sync() error {
|
|||||||
}
|
}
|
||||||
shimPath := runhcs.VMPipePath(cfg.HostUniqueID)
|
shimPath := runhcs.VMPipePath(cfg.HostUniqueID)
|
||||||
if err := runhcs.IssueVMRequest(shimPath, &req); err != nil {
|
if err := runhcs.IssueVMRequest(shimPath, &req); err != nil {
|
||||||
// The shim is likey gone. Simply ignore the sync as if it didn't exist.
|
// The shim is likely gone. Simply ignore the sync as if it didn't exist.
|
||||||
if perr, ok := err.(*os.PathError); ok && perr.Err == syscall.ERROR_FILE_NOT_FOUND {
|
if perr, ok := err.(*os.PathError); ok && perr.Err == syscall.ERROR_FILE_NOT_FOUND {
|
||||||
// Remove the reg key there is no point to try again
|
// Remove the reg key there is no point to try again
|
||||||
_ = cfg.Remove()
|
_ = cfg.Remove()
|
||||||
|
2
vendor/github.com/Microsoft/hcsshim/hcn/hcnnetwork.go
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/hcn/hcnnetwork.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
package hcn
|
package hcn
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
4
vendor/github.com/Microsoft/hcsshim/hcn/hcnpolicy.go
generated
vendored
4
vendor/github.com/Microsoft/hcsshim/hcn/hcnpolicy.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
package hcn
|
package hcn
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -292,7 +294,7 @@ const (
|
|||||||
type SetPolicySetting struct {
|
type SetPolicySetting struct {
|
||||||
Id string
|
Id string
|
||||||
Name string
|
Name string
|
||||||
Type SetPolicyType
|
Type SetPolicyType `json:"PolicyType"`
|
||||||
Values string
|
Values string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
2
vendor/github.com/Microsoft/hcsshim/hcn/hcnroute.go
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/hcn/hcnroute.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
package hcn
|
package hcn
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
11
vendor/github.com/Microsoft/hcsshim/hcn/hcnsupport.go
generated
vendored
11
vendor/github.com/Microsoft/hcsshim/hcn/hcnsupport.go
generated
vendored
@ -1,11 +1,14 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
package hcn
|
package hcn
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
|
||||||
|
"github.com/Microsoft/hcsshim/internal/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -112,9 +115,9 @@ func getSupportedFeatures() (SupportedFeatures, error) {
|
|||||||
features.NetworkACL = isFeatureSupported(globals.Version, NetworkACLPolicyVersion)
|
features.NetworkACL = isFeatureSupported(globals.Version, NetworkACLPolicyVersion)
|
||||||
features.NestedIpSet = isFeatureSupported(globals.Version, NestedIpSetVersion)
|
features.NestedIpSet = isFeatureSupported(globals.Version, NestedIpSetVersion)
|
||||||
|
|
||||||
logrus.WithFields(logrus.Fields{
|
log.L.WithFields(logrus.Fields{
|
||||||
"version": fmt.Sprintf("%+v", globals.Version),
|
"version": globals.Version,
|
||||||
"supportedFeatures": fmt.Sprintf("%+v", features),
|
"supportedFeatures": features,
|
||||||
}).Info("HCN feature check")
|
}).Info("HCN feature check")
|
||||||
|
|
||||||
return features, nil
|
return features, nil
|
||||||
|
2
vendor/github.com/Microsoft/hcsshim/hcsshim.go
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/hcsshim.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
// Shim for the Host Compute Service (HCS) to manage Windows Server
|
// Shim for the Host Compute Service (HCS) to manage Windows Server
|
||||||
// containers and Hyper-V containers.
|
// containers and Hyper-V containers.
|
||||||
|
|
||||||
|
2
vendor/github.com/Microsoft/hcsshim/hnsendpoint.go
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/hnsendpoint.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
package hcsshim
|
package hcsshim
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
2
vendor/github.com/Microsoft/hcsshim/hnsglobals.go
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/hnsglobals.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
package hcsshim
|
package hcsshim
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
6
vendor/github.com/Microsoft/hcsshim/hnsnetwork.go
generated
vendored
6
vendor/github.com/Microsoft/hcsshim/hnsnetwork.go
generated
vendored
@ -1,14 +1,16 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
package hcsshim
|
package hcsshim
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/Microsoft/hcsshim/internal/hns"
|
"github.com/Microsoft/hcsshim/internal/hns"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Subnet is assoicated with a network and represents a list
|
// Subnet is associated with a network and represents a list
|
||||||
// of subnets available to the network
|
// of subnets available to the network
|
||||||
type Subnet = hns.Subnet
|
type Subnet = hns.Subnet
|
||||||
|
|
||||||
// MacPool is assoicated with a network and represents a list
|
// MacPool is associated with a network and represents a list
|
||||||
// of macaddresses available to the network
|
// of macaddresses available to the network
|
||||||
type MacPool = hns.MacPool
|
type MacPool = hns.MacPool
|
||||||
|
|
||||||
|
2
vendor/github.com/Microsoft/hcsshim/hnspolicylist.go
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/hnspolicylist.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
package hcsshim
|
package hcsshim
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
2
vendor/github.com/Microsoft/hcsshim/hnssupport.go
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/hnssupport.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
package hcsshim
|
package hcsshim
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
2
vendor/github.com/Microsoft/hcsshim/interface.go
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/interface.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
package hcsshim
|
package hcsshim
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
1
vendor/github.com/Microsoft/hcsshim/internal/cni/doc.go
generated
vendored
Normal file
1
vendor/github.com/Microsoft/hcsshim/internal/cni/doc.go
generated
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
package cni
|
4
vendor/github.com/Microsoft/hcsshim/internal/cni/registry.go
generated
vendored
4
vendor/github.com/Microsoft/hcsshim/internal/cni/registry.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
package cni
|
package cni
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -84,7 +86,7 @@ func (pnc *PersistedNamespaceConfig) Store() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Remove removes any persisted state associated with this config. If the config
|
// Remove removes any persisted state associated with this config. If the config
|
||||||
// is not found in the registery `Remove` returns no error.
|
// is not found in the registry `Remove` returns no error.
|
||||||
func (pnc *PersistedNamespaceConfig) Remove() error {
|
func (pnc *PersistedNamespaceConfig) Remove() error {
|
||||||
if pnc.stored {
|
if pnc.stored {
|
||||||
sk, err := regstate.Open(cniRoot, false)
|
sk, err := regstate.Open(cniRoot, false)
|
||||||
|
2
vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
package cow
|
package cow
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
2
vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
package hcs
|
package hcs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
1
vendor/github.com/Microsoft/hcsshim/internal/hcs/doc.go
generated
vendored
Normal file
1
vendor/github.com/Microsoft/hcsshim/internal/hcs/doc.go
generated
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
package hcs
|
35
vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go
generated
vendored
35
vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
package hcs
|
package hcs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -51,6 +53,9 @@ var (
|
|||||||
// ErrUnexpectedValue is an error encountered when hcs returns an invalid value
|
// ErrUnexpectedValue is an error encountered when hcs returns an invalid value
|
||||||
ErrUnexpectedValue = errors.New("unexpected value returned from hcs")
|
ErrUnexpectedValue = errors.New("unexpected value returned from hcs")
|
||||||
|
|
||||||
|
// ErrOperationDenied is an error when hcs attempts an operation that is explicitly denied
|
||||||
|
ErrOperationDenied = errors.New("operation denied")
|
||||||
|
|
||||||
// ErrVmcomputeAlreadyStopped is an error encountered when a shutdown or terminate request is made on a stopped container
|
// ErrVmcomputeAlreadyStopped is an error encountered when a shutdown or terminate request is made on a stopped container
|
||||||
ErrVmcomputeAlreadyStopped = syscall.Errno(0xc0370110)
|
ErrVmcomputeAlreadyStopped = syscall.Errno(0xc0370110)
|
||||||
|
|
||||||
@ -252,8 +257,8 @@ func makeProcessError(process *Process, op string, err error, events []ErrorEven
|
|||||||
// will currently return true when the error is ErrElementNotFound.
|
// will currently return true when the error is ErrElementNotFound.
|
||||||
func IsNotExist(err error) bool {
|
func IsNotExist(err error) bool {
|
||||||
err = getInnerError(err)
|
err = getInnerError(err)
|
||||||
return err == ErrComputeSystemDoesNotExist ||
|
return errors.Is(err, ErrComputeSystemDoesNotExist) ||
|
||||||
err == ErrElementNotFound
|
errors.Is(err, ErrElementNotFound)
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsErrorInvalidHandle checks whether the error is the result of an operation carried
|
// IsErrorInvalidHandle checks whether the error is the result of an operation carried
|
||||||
@ -261,21 +266,21 @@ func IsNotExist(err error) bool {
|
|||||||
// stats on a container in the process of being stopped.
|
// stats on a container in the process of being stopped.
|
||||||
func IsErrorInvalidHandle(err error) bool {
|
func IsErrorInvalidHandle(err error) bool {
|
||||||
err = getInnerError(err)
|
err = getInnerError(err)
|
||||||
return err == ErrInvalidHandle
|
return errors.Is(err, ErrInvalidHandle)
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsAlreadyClosed checks if an error is caused by the Container or Process having been
|
// IsAlreadyClosed checks if an error is caused by the Container or Process having been
|
||||||
// already closed by a call to the Close() method.
|
// already closed by a call to the Close() method.
|
||||||
func IsAlreadyClosed(err error) bool {
|
func IsAlreadyClosed(err error) bool {
|
||||||
err = getInnerError(err)
|
err = getInnerError(err)
|
||||||
return err == ErrAlreadyClosed
|
return errors.Is(err, ErrAlreadyClosed)
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsPending returns a boolean indicating whether the error is that
|
// IsPending returns a boolean indicating whether the error is that
|
||||||
// the requested operation is being completed in the background.
|
// the requested operation is being completed in the background.
|
||||||
func IsPending(err error) bool {
|
func IsPending(err error) bool {
|
||||||
err = getInnerError(err)
|
err = getInnerError(err)
|
||||||
return err == ErrVmcomputeOperationPending
|
return errors.Is(err, ErrVmcomputeOperationPending)
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsTimeout returns a boolean indicating whether the error is caused by
|
// IsTimeout returns a boolean indicating whether the error is caused by
|
||||||
@ -285,7 +290,7 @@ func IsTimeout(err error) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
err = getInnerError(err)
|
err = getInnerError(err)
|
||||||
return err == ErrTimeout
|
return errors.Is(err, ErrTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsAlreadyStopped returns a boolean indicating whether the error is caused by
|
// IsAlreadyStopped returns a boolean indicating whether the error is caused by
|
||||||
@ -295,9 +300,9 @@ func IsTimeout(err error) bool {
|
|||||||
// will currently return true when the error is ErrElementNotFound.
|
// will currently return true when the error is ErrElementNotFound.
|
||||||
func IsAlreadyStopped(err error) bool {
|
func IsAlreadyStopped(err error) bool {
|
||||||
err = getInnerError(err)
|
err = getInnerError(err)
|
||||||
return err == ErrVmcomputeAlreadyStopped ||
|
return errors.Is(err, ErrVmcomputeAlreadyStopped) ||
|
||||||
err == ErrProcessAlreadyStopped ||
|
errors.Is(err, ErrProcessAlreadyStopped) ||
|
||||||
err == ErrElementNotFound
|
errors.Is(err, ErrElementNotFound)
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsNotSupported returns a boolean indicating whether the error is caused by
|
// IsNotSupported returns a boolean indicating whether the error is caused by
|
||||||
@ -308,24 +313,24 @@ func IsAlreadyStopped(err error) bool {
|
|||||||
func IsNotSupported(err error) bool {
|
func IsNotSupported(err error) bool {
|
||||||
err = getInnerError(err)
|
err = getInnerError(err)
|
||||||
// If Platform doesn't recognize or support the request sent, below errors are seen
|
// If Platform doesn't recognize or support the request sent, below errors are seen
|
||||||
return err == ErrVmcomputeInvalidJSON ||
|
return errors.Is(err, ErrVmcomputeInvalidJSON) ||
|
||||||
err == ErrInvalidData ||
|
errors.Is(err, ErrInvalidData) ||
|
||||||
err == ErrNotSupported ||
|
errors.Is(err, ErrNotSupported) ||
|
||||||
err == ErrVmcomputeUnknownMessage
|
errors.Is(err, ErrVmcomputeUnknownMessage)
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsOperationInvalidState returns true when err is caused by
|
// IsOperationInvalidState returns true when err is caused by
|
||||||
// `ErrVmcomputeOperationInvalidState`.
|
// `ErrVmcomputeOperationInvalidState`.
|
||||||
func IsOperationInvalidState(err error) bool {
|
func IsOperationInvalidState(err error) bool {
|
||||||
err = getInnerError(err)
|
err = getInnerError(err)
|
||||||
return err == ErrVmcomputeOperationInvalidState
|
return errors.Is(err, ErrVmcomputeOperationInvalidState)
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsAccessIsDenied returns true when err is caused by
|
// IsAccessIsDenied returns true when err is caused by
|
||||||
// `ErrVmcomputeOperationAccessIsDenied`.
|
// `ErrVmcomputeOperationAccessIsDenied`.
|
||||||
func IsAccessIsDenied(err error) bool {
|
func IsAccessIsDenied(err error) bool {
|
||||||
err = getInnerError(err)
|
err = getInnerError(err)
|
||||||
return err == ErrVmcomputeOperationAccessIsDenied
|
return errors.Is(err, ErrVmcomputeOperationAccessIsDenied)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getInnerError(err error) error {
|
func getInnerError(err error) error {
|
||||||
|
19
vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go
generated
vendored
19
vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
package hcs
|
package hcs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -114,9 +116,9 @@ func (process *Process) processSignalResult(ctx context.Context, err error) (boo
|
|||||||
|
|
||||||
// Signal signals the process with `options`.
|
// Signal signals the process with `options`.
|
||||||
//
|
//
|
||||||
// For LCOW `guestrequest.SignalProcessOptionsLCOW`.
|
// For LCOW `guestresource.SignalProcessOptionsLCOW`.
|
||||||
//
|
//
|
||||||
// For WCOW `guestrequest.SignalProcessOptionsWCOW`.
|
// For WCOW `guestresource.SignalProcessOptionsWCOW`.
|
||||||
func (process *Process) Signal(ctx context.Context, options interface{}) (bool, error) {
|
func (process *Process) Signal(ctx context.Context, options interface{}) (bool, error) {
|
||||||
process.handleLock.RLock()
|
process.handleLock.RLock()
|
||||||
defer process.handleLock.RUnlock()
|
defer process.handleLock.RUnlock()
|
||||||
@ -201,7 +203,7 @@ func (process *Process) Kill(ctx context.Context) (bool, error) {
|
|||||||
// call multiple times.
|
// call multiple times.
|
||||||
func (process *Process) waitBackground() {
|
func (process *Process) waitBackground() {
|
||||||
operation := "hcs::Process::waitBackground"
|
operation := "hcs::Process::waitBackground"
|
||||||
ctx, span := trace.StartSpan(context.Background(), operation)
|
ctx, span := oc.StartSpan(context.Background(), operation)
|
||||||
defer span.End()
|
defer span.End()
|
||||||
span.AddAttributes(
|
span.AddAttributes(
|
||||||
trace.StringAttribute("cid", process.SystemID()),
|
trace.StringAttribute("cid", process.SystemID()),
|
||||||
@ -254,7 +256,7 @@ func (process *Process) waitBackground() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Wait waits for the process to exit. If the process has already exited returns
|
// Wait waits for the process to exit. If the process has already exited returns
|
||||||
// the pervious error (if any).
|
// the previous error (if any).
|
||||||
func (process *Process) Wait() error {
|
func (process *Process) Wait() error {
|
||||||
<-process.waitBlock
|
<-process.waitBlock
|
||||||
return process.waitError
|
return process.waitError
|
||||||
@ -312,7 +314,7 @@ func (process *Process) ExitCode() (int, error) {
|
|||||||
// are the responsibility of the caller to close.
|
// are the responsibility of the caller to close.
|
||||||
func (process *Process) StdioLegacy() (_ io.WriteCloser, _ io.ReadCloser, _ io.ReadCloser, err error) {
|
func (process *Process) StdioLegacy() (_ io.WriteCloser, _ io.ReadCloser, _ io.ReadCloser, err error) {
|
||||||
operation := "hcs::Process::StdioLegacy"
|
operation := "hcs::Process::StdioLegacy"
|
||||||
ctx, span := trace.StartSpan(context.Background(), operation)
|
ctx, span := oc.StartSpan(context.Background(), operation)
|
||||||
defer span.End()
|
defer span.End()
|
||||||
defer func() { oc.SetSpanStatus(span, err) }()
|
defer func() { oc.SetSpanStatus(span, err) }()
|
||||||
span.AddAttributes(
|
span.AddAttributes(
|
||||||
@ -398,7 +400,7 @@ func (process *Process) CloseStdin(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (process *Process) CloseStdout(ctx context.Context) (err error) {
|
func (process *Process) CloseStdout(ctx context.Context) (err error) {
|
||||||
ctx, span := trace.StartSpan(ctx, "hcs::Process::CloseStdout") //nolint:ineffassign,staticcheck
|
ctx, span := oc.StartSpan(ctx, "hcs::Process::CloseStdout") //nolint:ineffassign,staticcheck
|
||||||
defer span.End()
|
defer span.End()
|
||||||
defer func() { oc.SetSpanStatus(span, err) }()
|
defer func() { oc.SetSpanStatus(span, err) }()
|
||||||
span.AddAttributes(
|
span.AddAttributes(
|
||||||
@ -422,7 +424,7 @@ func (process *Process) CloseStdout(ctx context.Context) (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (process *Process) CloseStderr(ctx context.Context) (err error) {
|
func (process *Process) CloseStderr(ctx context.Context) (err error) {
|
||||||
ctx, span := trace.StartSpan(ctx, "hcs::Process::CloseStderr") //nolint:ineffassign,staticcheck
|
ctx, span := oc.StartSpan(ctx, "hcs::Process::CloseStderr") //nolint:ineffassign,staticcheck
|
||||||
defer span.End()
|
defer span.End()
|
||||||
defer func() { oc.SetSpanStatus(span, err) }()
|
defer func() { oc.SetSpanStatus(span, err) }()
|
||||||
span.AddAttributes(
|
span.AddAttributes(
|
||||||
@ -441,7 +443,6 @@ func (process *Process) CloseStderr(ctx context.Context) (err error) {
|
|||||||
if process.stderr != nil {
|
if process.stderr != nil {
|
||||||
process.stderr.Close()
|
process.stderr.Close()
|
||||||
process.stderr = nil
|
process.stderr = nil
|
||||||
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -450,7 +451,7 @@ func (process *Process) CloseStderr(ctx context.Context) (err error) {
|
|||||||
// or wait on it.
|
// or wait on it.
|
||||||
func (process *Process) Close() (err error) {
|
func (process *Process) Close() (err error) {
|
||||||
operation := "hcs::Process::Close"
|
operation := "hcs::Process::Close"
|
||||||
ctx, span := trace.StartSpan(context.Background(), operation)
|
ctx, span := oc.StartSpan(context.Background(), operation)
|
||||||
defer span.End()
|
defer span.End()
|
||||||
defer func() { oc.SetSpanStatus(span, err) }()
|
defer func() { oc.SetSpanStatus(span, err) }()
|
||||||
span.AddAttributes(
|
span.AddAttributes(
|
||||||
|
4
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema1/schema1.go
generated
vendored
4
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema1/schema1.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
package schema1
|
package schema1
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -101,7 +103,7 @@ type ContainerConfig struct {
|
|||||||
HvRuntime *HvRuntime `json:",omitempty"` // Hyper-V container settings. Used by Hyper-V containers only. Format ImagePath=%root%\BaseLayerID\UtilityVM
|
HvRuntime *HvRuntime `json:",omitempty"` // Hyper-V container settings. Used by Hyper-V containers only. Format ImagePath=%root%\BaseLayerID\UtilityVM
|
||||||
Servicing bool `json:",omitempty"` // True if this container is for servicing
|
Servicing bool `json:",omitempty"` // True if this container is for servicing
|
||||||
AllowUnqualifiedDNSQuery bool `json:",omitempty"` // True to allow unqualified DNS name resolution
|
AllowUnqualifiedDNSQuery bool `json:",omitempty"` // True to allow unqualified DNS name resolution
|
||||||
DNSSearchList string `json:",omitempty"` // Comma seperated list of DNS suffixes to use for name resolution
|
DNSSearchList string `json:",omitempty"` // Comma separated list of DNS suffixes to use for name resolution
|
||||||
ContainerType string `json:",omitempty"` // "Linux" for Linux containers on Windows. Omitted otherwise.
|
ContainerType string `json:",omitempty"` // "Linux" for Linux containers on Windows. Omitted otherwise.
|
||||||
TerminateOnLastHandleClosed bool `json:",omitempty"` // Should HCS terminate the container once all handles have been closed
|
TerminateOnLastHandleClosed bool `json:",omitempty"` // Should HCS terminate the container once all handles have been closed
|
||||||
MappedVirtualDisks []MappedVirtualDisk `json:",omitempty"` // Array of virtual disks to mount at start
|
MappedVirtualDisks []MappedVirtualDisk `json:",omitempty"` // Array of virtual disks to mount at start
|
||||||
|
8
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_property.go
generated
vendored
8
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_property.go
generated
vendored
@ -9,6 +9,14 @@
|
|||||||
|
|
||||||
package hcsschema
|
package hcsschema
|
||||||
|
|
||||||
|
type CPUGroupPropertyCode uint32
|
||||||
|
|
||||||
|
const (
|
||||||
|
CPUCapacityProperty = 0x00010000
|
||||||
|
CPUSchedulingPriorityProperty = 0x00020000
|
||||||
|
IdleLPReserveProperty = 0x00030000
|
||||||
|
)
|
||||||
|
|
||||||
type CpuGroupProperty struct {
|
type CpuGroupProperty struct {
|
||||||
PropertyCode uint32 `json:"PropertyCode,omitempty"`
|
PropertyCode uint32 `json:"PropertyCode,omitempty"`
|
||||||
PropertyValue uint32 `json:"PropertyValue,omitempty"`
|
PropertyValue uint32 `json:"PropertyValue,omitempty"`
|
||||||
|
3
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_state.go
generated
vendored
3
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_state.go
generated
vendored
@ -14,6 +14,9 @@ type GuestState struct {
|
|||||||
// The path to an existing file uses for persistent guest state storage. An empty string indicates the system should initialize new transient, in-memory guest state.
|
// The path to an existing file uses for persistent guest state storage. An empty string indicates the system should initialize new transient, in-memory guest state.
|
||||||
GuestStateFilePath string `json:"GuestStateFilePath,omitempty"`
|
GuestStateFilePath string `json:"GuestStateFilePath,omitempty"`
|
||||||
|
|
||||||
|
// The guest state file type affected by different guest isolation modes - whether a file or block storage.
|
||||||
|
GuestStateFileType string `json:"GuestStateFileType,omitempty"`
|
||||||
|
|
||||||
// The path to an existing file for persistent runtime state storage. An empty string indicates the system should initialize new transient, in-memory runtime state.
|
// The path to an existing file for persistent runtime state storage. An empty string indicates the system should initialize new transient, in-memory runtime state.
|
||||||
RuntimeStateFilePath string `json:"RuntimeStateFilePath,omitempty"`
|
RuntimeStateFilePath string `json:"RuntimeStateFilePath,omitempty"`
|
||||||
|
|
||||||
|
21
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/isolation_settings.go
generated
vendored
Normal file
21
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/isolation_settings.go
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
/*
|
||||||
|
* HCS API
|
||||||
|
*
|
||||||
|
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
|
||||||
|
*
|
||||||
|
* API version: 2.4
|
||||||
|
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
|
||||||
|
*/
|
||||||
|
|
||||||
|
package hcsschema
|
||||||
|
|
||||||
|
type IsolationSettings struct {
|
||||||
|
// Guest isolation type options to decide virtual trust levels of virtual machine
|
||||||
|
IsolationType string `json:"IsolationType,omitempty"`
|
||||||
|
// Configuration to debug HCL layer for HCS VM TODO: Task 31102306: Miss the way to prevent the exposure of private debug configuration in HCS TODO: Think about the secret configurations which are private in VMMS VM (only edit by hvsedit)
|
||||||
|
DebugHost string `json:"DebugHost,omitempty"`
|
||||||
|
DebugPort int64 `json:"DebugPort,omitempty"`
|
||||||
|
// Optional data passed by host on isolated virtual machine start
|
||||||
|
LaunchData string `json:"LaunchData,omitempty"`
|
||||||
|
HclEnabled bool `json:"HclEnabled,omitempty"`
|
||||||
|
}
|
4
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modify_setting_request.go
generated
vendored
4
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modify_setting_request.go
generated
vendored
@ -9,10 +9,12 @@
|
|||||||
|
|
||||||
package hcsschema
|
package hcsschema
|
||||||
|
|
||||||
|
import "github.com/Microsoft/hcsshim/internal/protocol/guestrequest"
|
||||||
|
|
||||||
type ModifySettingRequest struct {
|
type ModifySettingRequest struct {
|
||||||
ResourcePath string `json:"ResourcePath,omitempty"`
|
ResourcePath string `json:"ResourcePath,omitempty"`
|
||||||
|
|
||||||
RequestType string `json:"RequestType,omitempty"`
|
RequestType guestrequest.RequestType `json:"RequestType,omitempty"` // NOTE: Swagger generated as string. Locally updated.
|
||||||
|
|
||||||
Settings interface{} `json:"Settings,omitempty"` // NOTE: Swagger generated as *interface{}. Locally updated
|
Settings interface{} `json:"Settings,omitempty"` // NOTE: Swagger generated as *interface{}. Locally updated
|
||||||
|
|
||||||
|
16
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/security_settings.go
generated
vendored
Normal file
16
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/security_settings.go
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
/*
|
||||||
|
* HCS API
|
||||||
|
*
|
||||||
|
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
|
||||||
|
*
|
||||||
|
* API version: 2.4
|
||||||
|
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
|
||||||
|
*/
|
||||||
|
|
||||||
|
package hcsschema
|
||||||
|
|
||||||
|
type SecuritySettings struct {
|
||||||
|
// Enablement of Trusted Platform Module on the computer system
|
||||||
|
EnableTpm bool `json:"EnableTpm,omitempty"`
|
||||||
|
Isolation *IsolationSettings `json:"Isolation,omitempty"`
|
||||||
|
}
|
28
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/system_time.go
generated
vendored
Normal file
28
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/system_time.go
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
/*
|
||||||
|
* HCS API
|
||||||
|
*
|
||||||
|
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
|
||||||
|
*
|
||||||
|
* API version: 2.1
|
||||||
|
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
|
||||||
|
*/
|
||||||
|
|
||||||
|
package hcsschema
|
||||||
|
|
||||||
|
type SystemTime struct {
|
||||||
|
Year int32 `json:"Year,omitempty"`
|
||||||
|
|
||||||
|
Month int32 `json:"Month,omitempty"`
|
||||||
|
|
||||||
|
DayOfWeek int32 `json:"DayOfWeek,omitempty"`
|
||||||
|
|
||||||
|
Day int32 `json:"Day,omitempty"`
|
||||||
|
|
||||||
|
Hour int32 `json:"Hour,omitempty"`
|
||||||
|
|
||||||
|
Minute int32 `json:"Minute,omitempty"`
|
||||||
|
|
||||||
|
Second int32 `json:"Second,omitempty"`
|
||||||
|
|
||||||
|
Milliseconds int32 `json:"Milliseconds,omitempty"`
|
||||||
|
}
|
26
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/time_zone_information.go
generated
vendored
Normal file
26
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/time_zone_information.go
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
/*
|
||||||
|
* HCS API
|
||||||
|
*
|
||||||
|
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
|
||||||
|
*
|
||||||
|
* API version: 2.1
|
||||||
|
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
|
||||||
|
*/
|
||||||
|
|
||||||
|
package hcsschema
|
||||||
|
|
||||||
|
type TimeZoneInformation struct {
|
||||||
|
Bias int32 `json:"Bias,omitempty"`
|
||||||
|
|
||||||
|
StandardName string `json:"StandardName,omitempty"`
|
||||||
|
|
||||||
|
StandardDate *SystemTime `json:"StandardDate,omitempty"`
|
||||||
|
|
||||||
|
StandardBias int32 `json:"StandardBias,omitempty"`
|
||||||
|
|
||||||
|
DaylightName string `json:"DaylightName,omitempty"`
|
||||||
|
|
||||||
|
DaylightDate *SystemTime `json:"DaylightDate,omitempty"`
|
||||||
|
|
||||||
|
DaylightBias int32 `json:"DaylightBias,omitempty"`
|
||||||
|
}
|
2
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi.go
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi.go
generated
vendored
@ -12,6 +12,8 @@ package hcsschema
|
|||||||
type Uefi struct {
|
type Uefi struct {
|
||||||
EnableDebugger bool `json:"EnableDebugger,omitempty"`
|
EnableDebugger bool `json:"EnableDebugger,omitempty"`
|
||||||
|
|
||||||
|
ApplySecureBootTemplate string `json:"ApplySecureBootTemplate,omitempty"`
|
||||||
|
|
||||||
SecureBootTemplateId string `json:"SecureBootTemplateId,omitempty"`
|
SecureBootTemplateId string `json:"SecureBootTemplateId,omitempty"`
|
||||||
|
|
||||||
BootThis *UefiBootEntry `json:"BootThis,omitempty"`
|
BootThis *UefiBootEntry `json:"BootThis,omitempty"`
|
||||||
|
2
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine.go
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine.go
generated
vendored
@ -29,4 +29,6 @@ type VirtualMachine struct {
|
|||||||
StorageQoS *StorageQoS `json:"StorageQoS,omitempty"`
|
StorageQoS *StorageQoS `json:"StorageQoS,omitempty"`
|
||||||
|
|
||||||
GuestConnection *GuestConnection `json:"GuestConnection,omitempty"`
|
GuestConnection *GuestConnection `json:"GuestConnection,omitempty"`
|
||||||
|
|
||||||
|
SecuritySettings *SecuritySettings `json:"SecuritySettings,omitempty"`
|
||||||
}
|
}
|
||||||
|
2
vendor/github.com/Microsoft/hcsshim/internal/hcs/service.go
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/internal/hcs/service.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
package hcs
|
package hcs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
27
vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go
generated
vendored
27
vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
package hcs
|
package hcs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -55,7 +57,7 @@ func CreateComputeSystem(ctx context.Context, id string, hcsDocumentInterface in
|
|||||||
|
|
||||||
// hcsCreateComputeSystemContext is an async operation. Start the outer span
|
// hcsCreateComputeSystemContext is an async operation. Start the outer span
|
||||||
// here to measure the full create time.
|
// here to measure the full create time.
|
||||||
ctx, span := trace.StartSpan(ctx, operation)
|
ctx, span := oc.StartSpan(ctx, operation)
|
||||||
defer span.End()
|
defer span.End()
|
||||||
defer func() { oc.SetSpanStatus(span, err) }()
|
defer func() { oc.SetSpanStatus(span, err) }()
|
||||||
span.AddAttributes(trace.StringAttribute("cid", id))
|
span.AddAttributes(trace.StringAttribute("cid", id))
|
||||||
@ -190,7 +192,7 @@ func (computeSystem *System) Start(ctx context.Context) (err error) {
|
|||||||
|
|
||||||
// hcsStartComputeSystemContext is an async operation. Start the outer span
|
// hcsStartComputeSystemContext is an async operation. Start the outer span
|
||||||
// here to measure the full start time.
|
// here to measure the full start time.
|
||||||
ctx, span := trace.StartSpan(ctx, operation)
|
ctx, span := oc.StartSpan(ctx, operation)
|
||||||
defer span.End()
|
defer span.End()
|
||||||
defer func() { oc.SetSpanStatus(span, err) }()
|
defer func() { oc.SetSpanStatus(span, err) }()
|
||||||
span.AddAttributes(trace.StringAttribute("cid", computeSystem.id))
|
span.AddAttributes(trace.StringAttribute("cid", computeSystem.id))
|
||||||
@ -265,7 +267,7 @@ func (computeSystem *System) Terminate(ctx context.Context) error {
|
|||||||
// safe to call multiple times.
|
// safe to call multiple times.
|
||||||
func (computeSystem *System) waitBackground() {
|
func (computeSystem *System) waitBackground() {
|
||||||
operation := "hcs::System::waitBackground"
|
operation := "hcs::System::waitBackground"
|
||||||
ctx, span := trace.StartSpan(context.Background(), operation)
|
ctx, span := oc.StartSpan(context.Background(), operation)
|
||||||
defer span.End()
|
defer span.End()
|
||||||
span.AddAttributes(trace.StringAttribute("cid", computeSystem.id))
|
span.AddAttributes(trace.StringAttribute("cid", computeSystem.id))
|
||||||
|
|
||||||
@ -495,7 +497,7 @@ func (computeSystem *System) PropertiesV2(ctx context.Context, types ...hcsschem
|
|||||||
if err == nil && len(fallbackTypes) == 0 {
|
if err == nil && len(fallbackTypes) == 0 {
|
||||||
return properties, nil
|
return properties, nil
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
logEntry.WithError(fmt.Errorf("failed to query compute system properties in-proc: %w", err))
|
logEntry = logEntry.WithError(fmt.Errorf("failed to query compute system properties in-proc: %w", err))
|
||||||
fallbackTypes = types
|
fallbackTypes = types
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -529,7 +531,7 @@ func (computeSystem *System) Pause(ctx context.Context) (err error) {
|
|||||||
|
|
||||||
// hcsPauseComputeSystemContext is an async peration. Start the outer span
|
// hcsPauseComputeSystemContext is an async peration. Start the outer span
|
||||||
// here to measure the full pause time.
|
// here to measure the full pause time.
|
||||||
ctx, span := trace.StartSpan(ctx, operation)
|
ctx, span := oc.StartSpan(ctx, operation)
|
||||||
defer span.End()
|
defer span.End()
|
||||||
defer func() { oc.SetSpanStatus(span, err) }()
|
defer func() { oc.SetSpanStatus(span, err) }()
|
||||||
span.AddAttributes(trace.StringAttribute("cid", computeSystem.id))
|
span.AddAttributes(trace.StringAttribute("cid", computeSystem.id))
|
||||||
@ -556,7 +558,7 @@ func (computeSystem *System) Resume(ctx context.Context) (err error) {
|
|||||||
|
|
||||||
// hcsResumeComputeSystemContext is an async operation. Start the outer span
|
// hcsResumeComputeSystemContext is an async operation. Start the outer span
|
||||||
// here to measure the full restore time.
|
// here to measure the full restore time.
|
||||||
ctx, span := trace.StartSpan(ctx, operation)
|
ctx, span := oc.StartSpan(ctx, operation)
|
||||||
defer span.End()
|
defer span.End()
|
||||||
defer func() { oc.SetSpanStatus(span, err) }()
|
defer func() { oc.SetSpanStatus(span, err) }()
|
||||||
span.AddAttributes(trace.StringAttribute("cid", computeSystem.id))
|
span.AddAttributes(trace.StringAttribute("cid", computeSystem.id))
|
||||||
@ -581,9 +583,9 @@ func (computeSystem *System) Resume(ctx context.Context) (err error) {
|
|||||||
func (computeSystem *System) Save(ctx context.Context, options interface{}) (err error) {
|
func (computeSystem *System) Save(ctx context.Context, options interface{}) (err error) {
|
||||||
operation := "hcs::System::Save"
|
operation := "hcs::System::Save"
|
||||||
|
|
||||||
// hcsSaveComputeSystemContext is an async peration. Start the outer span
|
// hcsSaveComputeSystemContext is an async operation. Start the outer span
|
||||||
// here to measure the full save time.
|
// here to measure the full save time.
|
||||||
ctx, span := trace.StartSpan(ctx, operation)
|
ctx, span := oc.StartSpan(ctx, operation)
|
||||||
defer span.End()
|
defer span.End()
|
||||||
defer func() { oc.SetSpanStatus(span, err) }()
|
defer func() { oc.SetSpanStatus(span, err) }()
|
||||||
span.AddAttributes(trace.StringAttribute("cid", computeSystem.id))
|
span.AddAttributes(trace.StringAttribute("cid", computeSystem.id))
|
||||||
@ -626,6 +628,11 @@ func (computeSystem *System) createProcess(ctx context.Context, operation string
|
|||||||
processInfo, processHandle, resultJSON, err := vmcompute.HcsCreateProcess(ctx, computeSystem.handle, configuration)
|
processInfo, processHandle, resultJSON, err := vmcompute.HcsCreateProcess(ctx, computeSystem.handle, configuration)
|
||||||
events := processHcsResult(ctx, resultJSON)
|
events := processHcsResult(ctx, resultJSON)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if v2, ok := c.(*hcsschema.ProcessParameters); ok {
|
||||||
|
operation += ": " + v2.CommandLine
|
||||||
|
} else if v1, ok := c.(*schema1.ProcessConfig); ok {
|
||||||
|
operation += ": " + v1.CommandLine
|
||||||
|
}
|
||||||
return nil, nil, makeSystemError(computeSystem, operation, err, events)
|
return nil, nil, makeSystemError(computeSystem, operation, err, events)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -692,7 +699,7 @@ func (computeSystem *System) OpenProcess(ctx context.Context, pid int) (*Process
|
|||||||
// Close cleans up any state associated with the compute system but does not terminate or wait for it.
|
// Close cleans up any state associated with the compute system but does not terminate or wait for it.
|
||||||
func (computeSystem *System) Close() (err error) {
|
func (computeSystem *System) Close() (err error) {
|
||||||
operation := "hcs::System::Close"
|
operation := "hcs::System::Close"
|
||||||
ctx, span := trace.StartSpan(context.Background(), operation)
|
ctx, span := oc.StartSpan(context.Background(), operation)
|
||||||
defer span.End()
|
defer span.End()
|
||||||
defer func() { oc.SetSpanStatus(span, err) }()
|
defer func() { oc.SetSpanStatus(span, err) }()
|
||||||
span.AddAttributes(trace.StringAttribute("cid", computeSystem.id))
|
span.AddAttributes(trace.StringAttribute("cid", computeSystem.id))
|
||||||
@ -762,7 +769,7 @@ func (computeSystem *System) unregisterCallback(ctx context.Context) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// hcsUnregisterComputeSystemCallback has its own syncronization
|
// hcsUnregisterComputeSystemCallback has its own synchronization
|
||||||
// to wait for all callbacks to complete. We must NOT hold the callbackMapLock.
|
// to wait for all callbacks to complete. We must NOT hold the callbackMapLock.
|
||||||
err := vmcompute.HcsUnregisterComputeSystemCallback(ctx, handle)
|
err := vmcompute.HcsUnregisterComputeSystemCallback(ctx, handle)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
2
vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
package hcs
|
package hcs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
2
vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
package hcs
|
package hcs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
1
vendor/github.com/Microsoft/hcsshim/internal/hcserror/doc.go
generated
vendored
Normal file
1
vendor/github.com/Microsoft/hcsshim/internal/hcserror/doc.go
generated
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
package hcserror
|
2
vendor/github.com/Microsoft/hcsshim/internal/hcserror/hcserror.go
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/internal/hcserror/hcserror.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
package hcserror
|
package hcserror
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
1
vendor/github.com/Microsoft/hcsshim/internal/hns/doc.go
generated
vendored
Normal file
1
vendor/github.com/Microsoft/hcsshim/internal/hns/doc.go
generated
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
package hns
|
4
vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go
generated
vendored
4
vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
package hns
|
package hns
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -146,7 +148,6 @@ func (endpoint *HNSEndpoint) IsAttached(vID string) (bool, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
return false, nil
|
return false, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create Endpoint by sending EndpointRequest to HNS. TODO: Create a separate HNS interface to place all these methods
|
// Create Endpoint by sending EndpointRequest to HNS. TODO: Create a separate HNS interface to place all these methods
|
||||||
@ -281,7 +282,6 @@ func (endpoint *HNSEndpoint) HostAttach(compartmentID uint16) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return hnsCall("POST", "/endpoints/"+endpoint.Id+"/attach", string(jsonString), &response)
|
return hnsCall("POST", "/endpoints/"+endpoint.Id+"/attach", string(jsonString), &response)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// HostDetach detaches a nic on the host
|
// HostDetach detaches a nic on the host
|
||||||
|
2
vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
package hns
|
package hns
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
2
vendor/github.com/Microsoft/hcsshim/internal/hns/hnsglobals.go
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/internal/hns/hnsglobals.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
package hns
|
package hns
|
||||||
|
|
||||||
type HNSGlobals struct {
|
type HNSGlobals struct {
|
||||||
|
9
vendor/github.com/Microsoft/hcsshim/internal/hns/hnsnetwork.go
generated
vendored
9
vendor/github.com/Microsoft/hcsshim/internal/hns/hnsnetwork.go
generated
vendored
@ -1,13 +1,16 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
package hns
|
package hns
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
"net"
|
"net"
|
||||||
|
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Subnet is assoicated with a network and represents a list
|
// Subnet is associated with a network and represents a list
|
||||||
// of subnets available to the network
|
// of subnets available to the network
|
||||||
type Subnet struct {
|
type Subnet struct {
|
||||||
AddressPrefix string `json:",omitempty"`
|
AddressPrefix string `json:",omitempty"`
|
||||||
@ -15,7 +18,7 @@ type Subnet struct {
|
|||||||
Policies []json.RawMessage `json:",omitempty"`
|
Policies []json.RawMessage `json:",omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// MacPool is assoicated with a network and represents a list
|
// MacPool is associated with a network and represents a list
|
||||||
// of macaddresses available to the network
|
// of macaddresses available to the network
|
||||||
type MacPool struct {
|
type MacPool struct {
|
||||||
StartMacAddress string `json:",omitempty"`
|
StartMacAddress string `json:",omitempty"`
|
||||||
|
2
vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicylist.go
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicylist.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
package hns
|
package hns
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
2
vendor/github.com/Microsoft/hcsshim/internal/hns/hnssupport.go
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/internal/hns/hnssupport.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
package hns
|
package hns
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
2
vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
package hns
|
package hns
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
1
vendor/github.com/Microsoft/hcsshim/internal/interop/doc.go
generated
vendored
Normal file
1
vendor/github.com/Microsoft/hcsshim/internal/interop/doc.go
generated
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
package interop
|
2
vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
package interop
|
package interop
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
8
vendor/github.com/Microsoft/hcsshim/internal/jobobject/doc.go
generated
vendored
Normal file
8
vendor/github.com/Microsoft/hcsshim/internal/jobobject/doc.go
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
// This package provides higher level constructs for the win32 job object API.
|
||||||
|
// Most of the core creation and management functions are already present in "golang.org/x/sys/windows"
|
||||||
|
// (CreateJobObject, AssignProcessToJobObject, etc.) as well as most of the limit information
|
||||||
|
// structs and associated limit flags. Whatever is not present from the job object API
|
||||||
|
// in golang.org/x/sys/windows is located in /internal/winapi.
|
||||||
|
//
|
||||||
|
// https://docs.microsoft.com/en-us/windows/win32/procthread/job-objects
|
||||||
|
package jobobject
|
2
vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
package jobobject
|
package jobobject
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
153
vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go
generated
vendored
153
vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go
generated
vendored
@ -1,10 +1,15 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
package jobobject
|
package jobobject
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
"sync"
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
"github.com/Microsoft/hcsshim/internal/queue"
|
"github.com/Microsoft/hcsshim/internal/queue"
|
||||||
@ -12,19 +17,14 @@ import (
|
|||||||
"golang.org/x/sys/windows"
|
"golang.org/x/sys/windows"
|
||||||
)
|
)
|
||||||
|
|
||||||
// This file provides higher level constructs for the win32 job object API.
|
|
||||||
// Most of the core creation and management functions are already present in "golang.org/x/sys/windows"
|
|
||||||
// (CreateJobObject, AssignProcessToJobObject, etc.) as well as most of the limit information
|
|
||||||
// structs and associated limit flags. Whatever is not present from the job object API
|
|
||||||
// in golang.org/x/sys/windows is located in /internal/winapi.
|
|
||||||
//
|
|
||||||
// https://docs.microsoft.com/en-us/windows/win32/procthread/job-objects
|
|
||||||
|
|
||||||
// JobObject is a high level wrapper around a Windows job object. Holds a handle to
|
// JobObject is a high level wrapper around a Windows job object. Holds a handle to
|
||||||
// the job, a queue to receive iocp notifications about the lifecycle
|
// the job, a queue to receive iocp notifications about the lifecycle
|
||||||
// of the job and a mutex for synchronized handle access.
|
// of the job and a mutex for synchronized handle access.
|
||||||
type JobObject struct {
|
type JobObject struct {
|
||||||
handle windows.Handle
|
handle windows.Handle
|
||||||
|
// All accesses to this MUST be done atomically except in `Open` as the object
|
||||||
|
// is being created in the function. 1 signifies that this job is currently a silo.
|
||||||
|
silo uint32
|
||||||
mq *queue.MessageQueue
|
mq *queue.MessageQueue
|
||||||
handleLock sync.RWMutex
|
handleLock sync.RWMutex
|
||||||
}
|
}
|
||||||
@ -56,6 +56,7 @@ const (
|
|||||||
var (
|
var (
|
||||||
ErrAlreadyClosed = errors.New("the handle has already been closed")
|
ErrAlreadyClosed = errors.New("the handle has already been closed")
|
||||||
ErrNotRegistered = errors.New("job is not registered to receive notifications")
|
ErrNotRegistered = errors.New("job is not registered to receive notifications")
|
||||||
|
ErrNotSilo = errors.New("job is not a silo")
|
||||||
)
|
)
|
||||||
|
|
||||||
// Options represents the set of configurable options when making or opening a job object.
|
// Options represents the set of configurable options when making or opening a job object.
|
||||||
@ -68,6 +69,9 @@ type Options struct {
|
|||||||
// `UseNTVariant` specifies if we should use the `Nt` variant of Open/CreateJobObject.
|
// `UseNTVariant` specifies if we should use the `Nt` variant of Open/CreateJobObject.
|
||||||
// Defaults to false.
|
// Defaults to false.
|
||||||
UseNTVariant bool
|
UseNTVariant bool
|
||||||
|
// `Silo` specifies to promote the job to a silo. This additionally sets the flag
|
||||||
|
// JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE as it is required for the upgrade to complete.
|
||||||
|
Silo bool
|
||||||
// `IOTracking` enables tracking I/O statistics on the job object. More specifically this
|
// `IOTracking` enables tracking I/O statistics on the job object. More specifically this
|
||||||
// calls SetInformationJobObject with the JobObjectIoAttribution class.
|
// calls SetInformationJobObject with the JobObjectIoAttribution class.
|
||||||
EnableIOTracking bool
|
EnableIOTracking bool
|
||||||
@ -143,6 +147,16 @@ func Create(ctx context.Context, options *Options) (_ *JobObject, err error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if options.Silo {
|
||||||
|
// This is a required setting for upgrading to a silo.
|
||||||
|
if err := job.SetTerminateOnLastHandleClose(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := job.PromoteToSilo(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return job, nil
|
return job, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -163,7 +177,7 @@ func Open(ctx context.Context, options *Options) (_ *JobObject, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var jobHandle windows.Handle
|
var jobHandle windows.Handle
|
||||||
if options != nil && options.UseNTVariant {
|
if options.UseNTVariant {
|
||||||
oa := winapi.ObjectAttributes{
|
oa := winapi.ObjectAttributes{
|
||||||
Length: unsafe.Sizeof(winapi.ObjectAttributes{}),
|
Length: unsafe.Sizeof(winapi.ObjectAttributes{}),
|
||||||
ObjectName: unicodeJobName,
|
ObjectName: unicodeJobName,
|
||||||
@ -174,7 +188,7 @@ func Open(ctx context.Context, options *Options) (_ *JobObject, err error) {
|
|||||||
return nil, winapi.RtlNtStatusToDosError(status)
|
return nil, winapi.RtlNtStatusToDosError(status)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
jobHandle, err = winapi.OpenJobObject(winapi.JOB_OBJECT_ALL_ACCESS, false, unicodeJobName.Buffer)
|
jobHandle, err = winapi.OpenJobObject(winapi.JOB_OBJECT_ALL_ACCESS, 0, unicodeJobName.Buffer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -190,9 +204,13 @@ func Open(ctx context.Context, options *Options) (_ *JobObject, err error) {
|
|||||||
handle: jobHandle,
|
handle: jobHandle,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if isJobSilo(jobHandle) {
|
||||||
|
job.silo = 1
|
||||||
|
}
|
||||||
|
|
||||||
// If the IOCP we'll be using to receive messages for all jobs hasn't been
|
// If the IOCP we'll be using to receive messages for all jobs hasn't been
|
||||||
// created, create it and start polling.
|
// created, create it and start polling.
|
||||||
if options != nil && options.Notifications {
|
if options.Notifications {
|
||||||
mq, err := setupNotifications(ctx, job)
|
mq, err := setupNotifications(ctx, job)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -450,6 +468,119 @@ func (job *JobObject) QueryStorageStats() (*winapi.JOBOBJECT_IO_ATTRIBUTION_INFO
|
|||||||
return &info, nil
|
return &info, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ApplyFileBinding makes a file binding using the Bind Filter from target to root. If the job has
|
||||||
|
// not been upgraded to a silo this call will fail. The binding is only applied and visible for processes
|
||||||
|
// running in the job, any processes on the host or in another job will not be able to see the binding.
|
||||||
|
func (job *JobObject) ApplyFileBinding(root, target string, merged bool) error {
|
||||||
|
job.handleLock.RLock()
|
||||||
|
defer job.handleLock.RUnlock()
|
||||||
|
|
||||||
|
if job.handle == 0 {
|
||||||
|
return ErrAlreadyClosed
|
||||||
|
}
|
||||||
|
|
||||||
|
if !job.isSilo() {
|
||||||
|
return ErrNotSilo
|
||||||
|
}
|
||||||
|
|
||||||
|
// The parent directory needs to exist for the bind to work. MkdirAll stats and
|
||||||
|
// returns nil if the directory exists internally so we should be fine to mkdirall
|
||||||
|
// every time.
|
||||||
|
if err := os.MkdirAll(filepath.Dir(root), 0); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
rootPtr, err := windows.UTF16PtrFromString(root)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
targetPtr, err := windows.UTF16PtrFromString(target)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
flags := winapi.BINDFLT_FLAG_USE_CURRENT_SILO_MAPPING
|
||||||
|
if merged {
|
||||||
|
flags |= winapi.BINDFLT_FLAG_MERGED_BIND_MAPPING
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := winapi.BfSetupFilter(
|
||||||
|
job.handle,
|
||||||
|
flags,
|
||||||
|
rootPtr,
|
||||||
|
targetPtr,
|
||||||
|
nil,
|
||||||
|
0,
|
||||||
|
); err != nil {
|
||||||
|
return fmt.Errorf("failed to bind target %q to root %q for job object: %w", target, root, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// isJobSilo is a helper to determine if a job object that was opened is a silo. This should ONLY be called
|
||||||
|
// from `Open` and any callers in this package afterwards should use `job.isSilo()`
|
||||||
|
func isJobSilo(h windows.Handle) bool {
|
||||||
|
// None of the information from the structure that this info class expects will be used, this is just used as
|
||||||
|
// the call will fail if the job hasn't been upgraded to a silo so we can use this to tell when we open a job
|
||||||
|
// if it's a silo or not. Because none of the info matters simply define a dummy struct with the size that the call
|
||||||
|
// expects which is 16 bytes.
|
||||||
|
type isSiloObj struct {
|
||||||
|
_ [16]byte
|
||||||
|
}
|
||||||
|
var siloInfo isSiloObj
|
||||||
|
err := winapi.QueryInformationJobObject(
|
||||||
|
h,
|
||||||
|
winapi.JobObjectSiloBasicInformation,
|
||||||
|
unsafe.Pointer(&siloInfo),
|
||||||
|
uint32(unsafe.Sizeof(siloInfo)),
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
return err == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PromoteToSilo promotes a job object to a silo. There must be no running processess
|
||||||
|
// in the job for this to succeed. If the job is already a silo this is a no-op.
|
||||||
|
func (job *JobObject) PromoteToSilo() error {
|
||||||
|
job.handleLock.RLock()
|
||||||
|
defer job.handleLock.RUnlock()
|
||||||
|
|
||||||
|
if job.handle == 0 {
|
||||||
|
return ErrAlreadyClosed
|
||||||
|
}
|
||||||
|
|
||||||
|
if job.isSilo() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
pids, err := job.Pids()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(pids) != 0 {
|
||||||
|
return fmt.Errorf("job cannot have running processes to be promoted to a silo, found %d running processes", len(pids))
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = windows.SetInformationJobObject(
|
||||||
|
job.handle,
|
||||||
|
winapi.JobObjectCreateSilo,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to promote job to silo: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
atomic.StoreUint32(&job.silo, 1)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// isSilo returns if the job object is a silo.
|
||||||
|
func (job *JobObject) isSilo() bool {
|
||||||
|
return atomic.LoadUint32(&job.silo) == 1
|
||||||
|
}
|
||||||
|
|
||||||
// QueryPrivateWorkingSet returns the private working set size for the job. This is calculated by adding up the
|
// QueryPrivateWorkingSet returns the private working set size for the job. This is calculated by adding up the
|
||||||
// private working set for every process running in the job.
|
// private working set for every process running in the job.
|
||||||
func (job *JobObject) QueryPrivateWorkingSet() (uint64, error) {
|
func (job *JobObject) QueryPrivateWorkingSet() (uint64, error) {
|
||||||
|
2
vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
package jobobject
|
package jobobject
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
116
vendor/github.com/Microsoft/hcsshim/internal/log/context.go
generated
vendored
Normal file
116
vendor/github.com/Microsoft/hcsshim/internal/log/context.go
generated
vendored
Normal file
@ -0,0 +1,116 @@
|
|||||||
|
package log
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"go.opencensus.io/trace"
|
||||||
|
)
|
||||||
|
|
||||||
|
type entryContextKeyType int
|
||||||
|
|
||||||
|
const _entryContextKey entryContextKeyType = iota
|
||||||
|
|
||||||
|
var (
|
||||||
|
// L is the default, blank logging entry. WithField and co. all return a copy
|
||||||
|
// of the original entry, so this will not leak fields between calls.
|
||||||
|
//
|
||||||
|
// Do NOT modify fields directly, as that will corrupt state for all users and
|
||||||
|
// is not thread safe.
|
||||||
|
// Instead, use `L.With*` or `L.Dup()`. Or `G(context.Background())`.
|
||||||
|
L = logrus.NewEntry(logrus.StandardLogger())
|
||||||
|
|
||||||
|
// G is an alias for GetEntry
|
||||||
|
G = GetEntry
|
||||||
|
|
||||||
|
// S is an alias for SetEntry
|
||||||
|
S = SetEntry
|
||||||
|
|
||||||
|
// U is an alias for UpdateContext
|
||||||
|
U = UpdateContext
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetEntry returns a `logrus.Entry` stored in the context, if one exists.
|
||||||
|
// Otherwise, it returns a default entry that points to the current context.
|
||||||
|
//
|
||||||
|
// Note: if the a new entry is returned, it will reference the passed in context.
|
||||||
|
// However, existing contexts may be stored in parent contexts and additionally reference
|
||||||
|
// earlier contexts.
|
||||||
|
// Use `UpdateContext` to update the entry and context.
|
||||||
|
func GetEntry(ctx context.Context) *logrus.Entry {
|
||||||
|
entry := fromContext(ctx)
|
||||||
|
|
||||||
|
if entry == nil {
|
||||||
|
entry = L.WithContext(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
return entry
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetEntry updates the log entry in the context with the provided fields, and
|
||||||
|
// returns both. It is equivlent to:
|
||||||
|
// entry := GetEntry(ctx).WithFields(fields)
|
||||||
|
// ctx = WithContext(ctx, entry)
|
||||||
|
//
|
||||||
|
// See WithContext for more information.
|
||||||
|
func SetEntry(ctx context.Context, fields logrus.Fields) (context.Context, *logrus.Entry) {
|
||||||
|
e := GetEntry(ctx)
|
||||||
|
if len(fields) > 0 {
|
||||||
|
e = e.WithFields(fields)
|
||||||
|
}
|
||||||
|
return WithContext(ctx, e)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateContext extracts the log entry from the context, and, if the entry's
|
||||||
|
// context points to a parent's of the current context, ands the entry
|
||||||
|
// to the most recent context. It is equivlent to:
|
||||||
|
// entry :=GetEntry(ctx)
|
||||||
|
// ctx = WithContext(ctx, entry)
|
||||||
|
//
|
||||||
|
// This allows the entry to reference the most recent context and any new
|
||||||
|
// values (such as span contexts) added to it.
|
||||||
|
//
|
||||||
|
// See WithContext for more information.
|
||||||
|
func UpdateContext(ctx context.Context) context.Context {
|
||||||
|
// there is no way to check its ctx (and not one of its parents) that contains `e`
|
||||||
|
// so, at a slight cost, force add `e` to the context
|
||||||
|
ctx, _ = WithContext(ctx, GetEntry(ctx))
|
||||||
|
return ctx
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithContext returns a context that contains the provided log entry.
|
||||||
|
// The entry can be extracted with `GetEntry` (`G`)
|
||||||
|
//
|
||||||
|
// The entry in the context is a copy of `entry` (generated by `entry.WithContext`)
|
||||||
|
func WithContext(ctx context.Context, entry *logrus.Entry) (context.Context, *logrus.Entry) {
|
||||||
|
// regardless of the order, entry.Context != GetEntry(ctx)
|
||||||
|
// here, the returned entry will reference the supplied context
|
||||||
|
entry = entry.WithContext(ctx)
|
||||||
|
ctx = context.WithValue(ctx, _entryContextKey, entry)
|
||||||
|
|
||||||
|
return ctx, entry
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy extracts the tracing Span and logging entry from the src Context, if they
|
||||||
|
// exist, and adds them to the dst Context.
|
||||||
|
//
|
||||||
|
// This is useful to share tracing and logging between contexts, but not the
|
||||||
|
// cancellation. For example, if the src Context has been cancelled but cleanup
|
||||||
|
// operations triggered by the cancellation require a non-cancelled context to
|
||||||
|
// execute.
|
||||||
|
func Copy(dst context.Context, src context.Context) context.Context {
|
||||||
|
if s := trace.FromContext(src); s != nil {
|
||||||
|
dst = trace.NewContext(dst, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
if e := fromContext(src); e != nil {
|
||||||
|
dst, _ = WithContext(dst, e)
|
||||||
|
}
|
||||||
|
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
func fromContext(ctx context.Context) *logrus.Entry {
|
||||||
|
e, _ := ctx.Value(_entryContextKey).(*logrus.Entry)
|
||||||
|
return e
|
||||||
|
}
|
23
vendor/github.com/Microsoft/hcsshim/internal/log/g.go
generated
vendored
23
vendor/github.com/Microsoft/hcsshim/internal/log/g.go
generated
vendored
@ -1,23 +0,0 @@
|
|||||||
package log
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
"go.opencensus.io/trace"
|
|
||||||
)
|
|
||||||
|
|
||||||
// G returns a `logrus.Entry` with the `TraceID, SpanID` from `ctx` if `ctx`
|
|
||||||
// contains an OpenCensus `trace.Span`.
|
|
||||||
func G(ctx context.Context) *logrus.Entry {
|
|
||||||
span := trace.FromContext(ctx)
|
|
||||||
if span != nil {
|
|
||||||
sctx := span.SpanContext()
|
|
||||||
return logrus.WithFields(logrus.Fields{
|
|
||||||
"traceID": sctx.TraceID.String(),
|
|
||||||
"spanID": sctx.SpanID.String(),
|
|
||||||
// "parentSpanID": TODO: JTERRY75 - Try to convince OC to export this?
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return logrus.NewEntry(logrus.StandardLogger())
|
|
||||||
}
|
|
45
vendor/github.com/Microsoft/hcsshim/internal/log/hook.go
generated
vendored
Normal file
45
vendor/github.com/Microsoft/hcsshim/internal/log/hook.go
generated
vendored
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
package log
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/Microsoft/hcsshim/internal/logfields"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"go.opencensus.io/trace"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Hook serves to intercept and format `logrus.Entry`s before they are passed
|
||||||
|
// to the ETW hook.
|
||||||
|
//
|
||||||
|
// The containerd shim discards the (formatted) logrus output, and outputs only via ETW.
|
||||||
|
// The Linux GCS outputs logrus entries over stdout, which is consumed by the shim and
|
||||||
|
// then re-output via the ETW hook.
|
||||||
|
type Hook struct{}
|
||||||
|
|
||||||
|
var _ logrus.Hook = &Hook{}
|
||||||
|
|
||||||
|
func NewHook() *Hook {
|
||||||
|
return &Hook{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Hook) Levels() []logrus.Level {
|
||||||
|
return logrus.AllLevels
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Hook) Fire(e *logrus.Entry) (err error) {
|
||||||
|
h.addSpanContext(e)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Hook) addSpanContext(e *logrus.Entry) {
|
||||||
|
ctx := e.Context
|
||||||
|
if ctx == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
span := trace.FromContext(ctx)
|
||||||
|
if span == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
sctx := span.SpanContext()
|
||||||
|
e.Data[logfields.TraceID] = sctx.TraceID.String()
|
||||||
|
e.Data[logfields.SpanID] = sctx.SpanID.String()
|
||||||
|
}
|
194
vendor/github.com/Microsoft/hcsshim/internal/log/scrub.go
generated
vendored
Normal file
194
vendor/github.com/Microsoft/hcsshim/internal/log/scrub.go
generated
vendored
Normal file
@ -0,0 +1,194 @@
|
|||||||
|
package log
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"strings"
|
||||||
|
"sync/atomic"
|
||||||
|
|
||||||
|
hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// This package scrubs objects of potentially sensitive information to pass to logging
|
||||||
|
|
||||||
|
type genMap = map[string]interface{}
|
||||||
|
type scrubberFunc func(genMap) error
|
||||||
|
|
||||||
|
const _scrubbedReplacement = "<scrubbed>"
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrUnknownType = errors.New("encoded object is of unknown type")
|
||||||
|
|
||||||
|
// case sensitive keywords, so "env" is not a substring on "Environment"
|
||||||
|
_scrubKeywords = [][]byte{[]byte("env"), []byte("Environment")}
|
||||||
|
|
||||||
|
_scrub int32
|
||||||
|
)
|
||||||
|
|
||||||
|
// SetScrubbing enables scrubbing
|
||||||
|
func SetScrubbing(enable bool) {
|
||||||
|
v := int32(0) // cant convert from bool to int32 directly
|
||||||
|
if enable {
|
||||||
|
v = 1
|
||||||
|
}
|
||||||
|
atomic.StoreInt32(&_scrub, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsScrubbingEnabled checks if scrubbing is enabled
|
||||||
|
func IsScrubbingEnabled() bool {
|
||||||
|
v := atomic.LoadInt32(&_scrub)
|
||||||
|
return v != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScrubProcessParameters scrubs HCS Create Process requests with config parameters of
|
||||||
|
// type internal/hcs/schema2.ScrubProcessParameters (aka hcsshema.ScrubProcessParameters)
|
||||||
|
func ScrubProcessParameters(s string) (string, error) {
|
||||||
|
// todo: deal with v1 ProcessConfig
|
||||||
|
b := []byte(s)
|
||||||
|
if !IsScrubbingEnabled() || !hasKeywords(b) || !json.Valid(b) {
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
pp := hcsschema.ProcessParameters{}
|
||||||
|
if err := json.Unmarshal(b, &pp); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
pp.Environment = map[string]string{_scrubbedReplacement: _scrubbedReplacement}
|
||||||
|
|
||||||
|
buf := bytes.NewBuffer(b[:0])
|
||||||
|
if err := encode(buf, pp); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return strings.TrimSpace(buf.String()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScrubBridgeCreate scrubs requests sent over the bridge of type
|
||||||
|
// internal/gcs/protocol.containerCreate wrapping an internal/hcsoci.linuxHostedSystem
|
||||||
|
func ScrubBridgeCreate(b []byte) ([]byte, error) {
|
||||||
|
return scrubBytes(b, scrubBridgeCreate)
|
||||||
|
}
|
||||||
|
|
||||||
|
func scrubBridgeCreate(m genMap) error {
|
||||||
|
if !isRequestBase(m) {
|
||||||
|
return ErrUnknownType
|
||||||
|
}
|
||||||
|
if ss, ok := m["ContainerConfig"]; ok {
|
||||||
|
// ContainerConfig is a json encoded struct passed as a regular string field
|
||||||
|
s, ok := ss.(string)
|
||||||
|
if !ok {
|
||||||
|
return ErrUnknownType
|
||||||
|
}
|
||||||
|
b, err := scrubBytes([]byte(s), scrubLinuxHostedSystem)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
m["ContainerConfig"] = string(b)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return ErrUnknownType
|
||||||
|
}
|
||||||
|
|
||||||
|
func scrubLinuxHostedSystem(m genMap) error {
|
||||||
|
if m, ok := index(m, "OciSpecification"); ok {
|
||||||
|
if _, ok := m["annotations"]; ok {
|
||||||
|
m["annotations"] = map[string]string{_scrubbedReplacement: _scrubbedReplacement}
|
||||||
|
}
|
||||||
|
if m, ok := index(m, "process"); ok {
|
||||||
|
if _, ok := m["env"]; ok {
|
||||||
|
m["env"] = []string{_scrubbedReplacement}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ErrUnknownType
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScrubBridgeExecProcess scrubs requests sent over the bridge of type
|
||||||
|
// internal/gcs/protocol.containerExecuteProcess
|
||||||
|
func ScrubBridgeExecProcess(b []byte) ([]byte, error) {
|
||||||
|
return scrubBytes(b, scrubExecuteProcess)
|
||||||
|
}
|
||||||
|
|
||||||
|
func scrubExecuteProcess(m genMap) error {
|
||||||
|
if !isRequestBase(m) {
|
||||||
|
return ErrUnknownType
|
||||||
|
}
|
||||||
|
if m, ok := index(m, "Settings"); ok {
|
||||||
|
if ss, ok := m["ProcessParameters"]; ok {
|
||||||
|
// ProcessParameters is a json encoded struct passed as a regular sting field
|
||||||
|
s, ok := ss.(string)
|
||||||
|
if !ok {
|
||||||
|
return ErrUnknownType
|
||||||
|
}
|
||||||
|
|
||||||
|
s, err := ScrubProcessParameters(s)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
m["ProcessParameters"] = s
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ErrUnknownType
|
||||||
|
}
|
||||||
|
|
||||||
|
func scrubBytes(b []byte, scrub scrubberFunc) ([]byte, error) {
|
||||||
|
if !IsScrubbingEnabled() || !hasKeywords(b) || !json.Valid(b) {
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
m := make(genMap)
|
||||||
|
if err := json.Unmarshal(b, &m); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// could use regexp, but if the env strings contain braces, the regexp fails
|
||||||
|
// parsing into individual structs would require access to private structs
|
||||||
|
if err := scrub(m); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := &bytes.Buffer{}
|
||||||
|
if err := encode(buf, m); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return bytes.TrimSpace(buf.Bytes()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func encode(buf *bytes.Buffer, v interface{}) error {
|
||||||
|
enc := json.NewEncoder(buf)
|
||||||
|
enc.SetEscapeHTML(false)
|
||||||
|
if err := enc.Encode(v); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func isRequestBase(m genMap) bool {
|
||||||
|
// neither of these are (currently) `omitempty`
|
||||||
|
_, a := m["ActivityId"]
|
||||||
|
_, c := m["ContainerId"]
|
||||||
|
return a && c
|
||||||
|
}
|
||||||
|
|
||||||
|
// combination `m, ok := m[s]` and `m, ok := m.(genMap)`
|
||||||
|
func index(m genMap, s string) (genMap, bool) {
|
||||||
|
if m, ok := m[s]; ok {
|
||||||
|
mm, ok := m.(genMap)
|
||||||
|
return mm, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
return m, false
|
||||||
|
}
|
||||||
|
|
||||||
|
func hasKeywords(b []byte) bool {
|
||||||
|
for _, bb := range _scrubKeywords {
|
||||||
|
if bytes.Contains(b, bb) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
35
vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go
generated
vendored
35
vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go
generated
vendored
@ -3,21 +3,44 @@ package logfields
|
|||||||
const (
|
const (
|
||||||
// Identifiers
|
// Identifiers
|
||||||
|
|
||||||
|
Name = "name"
|
||||||
|
Namespace = "namespace"
|
||||||
|
Operation = "operation"
|
||||||
|
|
||||||
|
ID = "id"
|
||||||
|
SandboxID = "sid"
|
||||||
ContainerID = "cid"
|
ContainerID = "cid"
|
||||||
UVMID = "uvm-id"
|
ExecID = "eid"
|
||||||
ProcessID = "pid"
|
ProcessID = "pid"
|
||||||
|
TaskID = "tid"
|
||||||
|
UVMID = "uvm-id"
|
||||||
|
|
||||||
|
// networking and IO
|
||||||
|
|
||||||
|
File = "file"
|
||||||
|
Path = "path"
|
||||||
|
Bytes = "bytes"
|
||||||
|
Pipe = "pipe"
|
||||||
|
|
||||||
// Common Misc
|
// Common Misc
|
||||||
|
|
||||||
// Timeout represents an operation timeout.
|
Attempt = "attemptNo"
|
||||||
Timeout = "timeout"
|
|
||||||
JSON = "json"
|
JSON = "json"
|
||||||
|
|
||||||
|
// Time
|
||||||
|
|
||||||
|
StartTime = "startTime"
|
||||||
|
EndTime = "endTime"
|
||||||
|
Duration = "duration"
|
||||||
|
Timeout = "timeout"
|
||||||
|
|
||||||
// Keys/values
|
// Keys/values
|
||||||
|
|
||||||
Field = "field"
|
Field = "field"
|
||||||
|
Key = "key"
|
||||||
OCIAnnotation = "oci-annotation"
|
OCIAnnotation = "oci-annotation"
|
||||||
Value = "value"
|
Value = "value"
|
||||||
|
Options = "options"
|
||||||
|
|
||||||
// Golang type's
|
// Golang type's
|
||||||
|
|
||||||
@ -29,4 +52,10 @@ const (
|
|||||||
// runhcs
|
// runhcs
|
||||||
|
|
||||||
VMShimOperation = "vmshim-op"
|
VMShimOperation = "vmshim-op"
|
||||||
|
|
||||||
|
// logging and tracing
|
||||||
|
|
||||||
|
TraceID = "traceID"
|
||||||
|
SpanID = "spanID"
|
||||||
|
ParentSpanID = "parentSpanID"
|
||||||
)
|
)
|
||||||
|
316
vendor/github.com/Microsoft/hcsshim/internal/memory/pool.go
generated
vendored
Normal file
316
vendor/github.com/Microsoft/hcsshim/internal/memory/pool.go
generated
vendored
Normal file
@ -0,0 +1,316 @@
|
|||||||
|
package memory
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
minimumClassSize = MiB
|
||||||
|
maximumClassSize = 4 * GiB
|
||||||
|
memoryClassNumber = 7
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrInvalidMemoryClass = errors.New("invalid memory class")
|
||||||
|
ErrEarlyMerge = errors.New("not all children have been freed")
|
||||||
|
ErrEmptyPoolOperation = errors.New("operation on empty pool")
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetMemoryClassType returns the minimum memory class type that can hold a device of
|
||||||
|
// a given size. The smallest class is 1MB and the largest one is 4GB with 2 bit offset
|
||||||
|
// intervals in between, for a total of 7 different classes. This function does not
|
||||||
|
// do a validity check
|
||||||
|
func GetMemoryClassType(s uint64) classType {
|
||||||
|
s = (s - 1) >> 20
|
||||||
|
memCls := uint32(0)
|
||||||
|
for s > 0 {
|
||||||
|
s = s >> 2
|
||||||
|
memCls++
|
||||||
|
}
|
||||||
|
return classType(memCls)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetMemoryClassSize returns size in bytes for a given memory class
|
||||||
|
func GetMemoryClassSize(memCls classType) (uint64, error) {
|
||||||
|
if memCls >= memoryClassNumber {
|
||||||
|
return 0, ErrInvalidMemoryClass
|
||||||
|
}
|
||||||
|
return minimumClassSize << (2 * memCls), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// region represents a contiguous memory block
|
||||||
|
type region struct {
|
||||||
|
// parent region that has been split into 4
|
||||||
|
parent *region
|
||||||
|
class classType
|
||||||
|
// offset represents offset in bytes
|
||||||
|
offset uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// memoryPool tracks free and busy (used) memory regions
|
||||||
|
type memoryPool struct {
|
||||||
|
free map[uint64]*region
|
||||||
|
busy map[uint64]*region
|
||||||
|
}
|
||||||
|
|
||||||
|
// PoolAllocator implements a memory allocation strategy similar to buddy-malloc https://github.com/evanw/buddy-malloc/blob/master/buddy-malloc.c
|
||||||
|
// We borrow the idea of spanning a tree of fixed size regions on top of a contiguous memory
|
||||||
|
// space.
|
||||||
|
//
|
||||||
|
// There are a total of 7 different region sizes that can be allocated, with the smallest
|
||||||
|
// being 1MB and the largest 4GB (the default maximum size of a Virtual PMem device).
|
||||||
|
//
|
||||||
|
// For efficiency and to reduce fragmentation an entire region is allocated when requested.
|
||||||
|
// When there's no available region of requested size, we try to allocate more memory for
|
||||||
|
// this particular size by splitting the next available larger region into smaller ones, e.g.
|
||||||
|
// if there's no region available for size class 0, we try splitting a region from class 1,
|
||||||
|
// then class 2 etc, until we are able to do so or hit the upper limit.
|
||||||
|
type PoolAllocator struct {
|
||||||
|
pools [memoryClassNumber]*memoryPool
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ MappedRegion = ®ion{}
|
||||||
|
var _ Allocator = &PoolAllocator{}
|
||||||
|
|
||||||
|
func (r *region) Offset() uint64 {
|
||||||
|
return r.offset
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *region) Size() uint64 {
|
||||||
|
sz, err := GetMemoryClassSize(r.class)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return sz
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *region) Type() classType {
|
||||||
|
return r.class
|
||||||
|
}
|
||||||
|
|
||||||
|
func newEmptyMemoryPool() *memoryPool {
|
||||||
|
return &memoryPool{
|
||||||
|
free: make(map[uint64]*region),
|
||||||
|
busy: make(map[uint64]*region),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewPoolMemoryAllocator() PoolAllocator {
|
||||||
|
pa := PoolAllocator{}
|
||||||
|
p := newEmptyMemoryPool()
|
||||||
|
// by default we allocate a single region with maximum possible size (class type)
|
||||||
|
p.free[0] = ®ion{
|
||||||
|
class: memoryClassNumber - 1,
|
||||||
|
offset: 0,
|
||||||
|
}
|
||||||
|
pa.pools[memoryClassNumber-1] = p
|
||||||
|
return pa
|
||||||
|
}
|
||||||
|
|
||||||
|
// Allocate checks memory region pool for the given `size` and returns a free region with
|
||||||
|
// minimal offset, if none available tries expanding matched memory pool.
|
||||||
|
//
|
||||||
|
// Internally it's done via moving a region from free pool into a busy pool
|
||||||
|
func (pa *PoolAllocator) Allocate(size uint64) (MappedRegion, error) {
|
||||||
|
memCls := GetMemoryClassType(size)
|
||||||
|
if memCls >= memoryClassNumber {
|
||||||
|
return nil, ErrInvalidMemoryClass
|
||||||
|
}
|
||||||
|
|
||||||
|
// find region with the smallest offset
|
||||||
|
nextCls, nextOffset, err := pa.findNextOffset(memCls)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// this means that there are no more regions for the current class, try expanding
|
||||||
|
if nextCls != memCls {
|
||||||
|
if err := pa.split(memCls); err != nil {
|
||||||
|
if err == ErrInvalidMemoryClass {
|
||||||
|
return nil, ErrNotEnoughSpace
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := pa.markBusy(memCls, nextOffset); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// by this point memory pool for memCls should have been created,
|
||||||
|
// either prior or during split call
|
||||||
|
if r := pa.pools[memCls].busy[nextOffset]; r != nil {
|
||||||
|
return r, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, ErrNotEnoughSpace
|
||||||
|
}
|
||||||
|
|
||||||
|
// Release marks a memory region of class `memCls` and offset `offset` as free and tries to merge smaller regions into
|
||||||
|
// a bigger one
|
||||||
|
func (pa *PoolAllocator) Release(reg MappedRegion) error {
|
||||||
|
mp := pa.pools[reg.Type()]
|
||||||
|
if mp == nil {
|
||||||
|
return ErrEmptyPoolOperation
|
||||||
|
}
|
||||||
|
|
||||||
|
err := pa.markFree(reg.Type(), reg.Offset())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
n := mp.free[reg.Offset()]
|
||||||
|
if n == nil {
|
||||||
|
return ErrNotAllocated
|
||||||
|
}
|
||||||
|
if err := pa.merge(n.parent); err != nil {
|
||||||
|
if err != ErrEarlyMerge {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// findNextOffset finds next region location for a given memCls
|
||||||
|
func (pa *PoolAllocator) findNextOffset(memCls classType) (classType, uint64, error) {
|
||||||
|
for mc := memCls; mc < memoryClassNumber; mc++ {
|
||||||
|
pi := pa.pools[mc]
|
||||||
|
if pi == nil || len(pi.free) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
target := uint64(maximumClassSize)
|
||||||
|
for offset := range pi.free {
|
||||||
|
if offset < target {
|
||||||
|
target = offset
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return mc, target, nil
|
||||||
|
}
|
||||||
|
return 0, 0, ErrNotEnoughSpace
|
||||||
|
}
|
||||||
|
|
||||||
|
// split tries to recursively split a bigger memory region into smaller ones until it succeeds or hits the upper limit
|
||||||
|
func (pa *PoolAllocator) split(clsType classType) error {
|
||||||
|
nextClsType := clsType + 1
|
||||||
|
if nextClsType >= memoryClassNumber {
|
||||||
|
return ErrInvalidMemoryClass
|
||||||
|
}
|
||||||
|
|
||||||
|
nextPool := pa.pools[nextClsType]
|
||||||
|
if nextPool == nil {
|
||||||
|
nextPool = newEmptyMemoryPool()
|
||||||
|
pa.pools[nextClsType] = nextPool
|
||||||
|
}
|
||||||
|
|
||||||
|
cls, offset, err := pa.findNextOffset(nextClsType)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// not enough memory in the next class, try to recursively expand
|
||||||
|
if cls != nextClsType {
|
||||||
|
if err := pa.split(nextClsType); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := pa.markBusy(nextClsType, offset); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// memCls validity has been checked already, we can ignore the error
|
||||||
|
clsSize, _ := GetMemoryClassSize(clsType)
|
||||||
|
|
||||||
|
nextReg := nextPool.busy[offset]
|
||||||
|
if nextReg == nil {
|
||||||
|
return ErrNotAllocated
|
||||||
|
}
|
||||||
|
|
||||||
|
// expand memCls
|
||||||
|
cp := pa.pools[clsType]
|
||||||
|
if cp == nil {
|
||||||
|
cp = newEmptyMemoryPool()
|
||||||
|
pa.pools[clsType] = cp
|
||||||
|
}
|
||||||
|
// create 4 smaller regions
|
||||||
|
for i := uint64(0); i < 4; i++ {
|
||||||
|
offset := nextReg.offset + i*clsSize
|
||||||
|
reg := ®ion{
|
||||||
|
parent: nextReg,
|
||||||
|
class: clsType,
|
||||||
|
offset: offset,
|
||||||
|
}
|
||||||
|
cp.free[offset] = reg
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pa *PoolAllocator) merge(parent *region) error {
|
||||||
|
// nothing to merge
|
||||||
|
if parent == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
childCls := parent.class - 1
|
||||||
|
childPool := pa.pools[childCls]
|
||||||
|
// no child nodes to merge, try to merge parent
|
||||||
|
if childPool == nil {
|
||||||
|
return pa.merge(parent.parent)
|
||||||
|
}
|
||||||
|
|
||||||
|
childSize, err := GetMemoryClassSize(childCls)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// check if all the child nodes are free
|
||||||
|
var children []*region
|
||||||
|
for i := uint64(0); i < 4; i++ {
|
||||||
|
child, free := childPool.free[parent.offset+i*childSize]
|
||||||
|
if !free {
|
||||||
|
return ErrEarlyMerge
|
||||||
|
}
|
||||||
|
children = append(children, child)
|
||||||
|
}
|
||||||
|
|
||||||
|
// at this point all the child nodes will be free and we can merge
|
||||||
|
for _, child := range children {
|
||||||
|
delete(childPool.free, child.offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := pa.markFree(parent.class, parent.offset); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return pa.merge(parent.parent)
|
||||||
|
}
|
||||||
|
|
||||||
|
// markFree internally moves a region with `offset` from busy to free map
|
||||||
|
func (pa *PoolAllocator) markFree(memCls classType, offset uint64) error {
|
||||||
|
clsPool := pa.pools[memCls]
|
||||||
|
if clsPool == nil {
|
||||||
|
return ErrEmptyPoolOperation
|
||||||
|
}
|
||||||
|
|
||||||
|
if reg, exists := clsPool.busy[offset]; exists {
|
||||||
|
clsPool.free[offset] = reg
|
||||||
|
delete(clsPool.busy, offset)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return ErrNotAllocated
|
||||||
|
}
|
||||||
|
|
||||||
|
// markBusy internally moves a region with `offset` from free to busy map
|
||||||
|
func (pa *PoolAllocator) markBusy(memCls classType, offset uint64) error {
|
||||||
|
clsPool := pa.pools[memCls]
|
||||||
|
if clsPool == nil {
|
||||||
|
return ErrEmptyPoolOperation
|
||||||
|
}
|
||||||
|
|
||||||
|
if reg, exists := clsPool.free[offset]; exists {
|
||||||
|
clsPool.busy[offset] = reg
|
||||||
|
delete(clsPool.free, offset)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return ErrNotAllocated
|
||||||
|
}
|
28
vendor/github.com/Microsoft/hcsshim/internal/memory/types.go
generated
vendored
Normal file
28
vendor/github.com/Microsoft/hcsshim/internal/memory/types.go
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
package memory
|
||||||
|
|
||||||
|
import "github.com/pkg/errors"
|
||||||
|
|
||||||
|
type classType uint32
|
||||||
|
|
||||||
|
const (
|
||||||
|
MiB = 1024 * 1024
|
||||||
|
GiB = 1024 * MiB
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrNotEnoughSpace = errors.New("not enough space")
|
||||||
|
ErrNotAllocated = errors.New("no memory allocated at the given offset")
|
||||||
|
)
|
||||||
|
|
||||||
|
// MappedRegion represents a memory block with an offset
|
||||||
|
type MappedRegion interface {
|
||||||
|
Offset() uint64
|
||||||
|
Size() uint64
|
||||||
|
Type() classType
|
||||||
|
}
|
||||||
|
|
||||||
|
// Allocator is an interface for memory allocation
|
||||||
|
type Allocator interface {
|
||||||
|
Allocate(uint64) (MappedRegion, error)
|
||||||
|
Release(MappedRegion) error
|
||||||
|
}
|
31
vendor/github.com/Microsoft/hcsshim/internal/oc/span.go
generated
vendored
31
vendor/github.com/Microsoft/hcsshim/internal/oc/span.go
generated
vendored
@ -1,9 +1,14 @@
|
|||||||
package oc
|
package oc
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/Microsoft/hcsshim/internal/log"
|
||||||
"go.opencensus.io/trace"
|
"go.opencensus.io/trace"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var DefaultSampler = trace.AlwaysSample()
|
||||||
|
|
||||||
// SetSpanStatus sets `span.SetStatus` to the proper status depending on `err`. If
|
// SetSpanStatus sets `span.SetStatus` to the proper status depending on `err`. If
|
||||||
// `err` is `nil` assumes `trace.StatusCodeOk`.
|
// `err` is `nil` assumes `trace.StatusCodeOk`.
|
||||||
func SetSpanStatus(span *trace.Span, err error) {
|
func SetSpanStatus(span *trace.Span, err error) {
|
||||||
@ -15,3 +20,29 @@ func SetSpanStatus(span *trace.Span, err error) {
|
|||||||
}
|
}
|
||||||
span.SetStatus(status)
|
span.SetStatus(status)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// StartSpan wraps "go.opencensus.io/trace".StartSpan, but, if the span is sampling,
|
||||||
|
// adds a log entry to the context that points to the newly created span.
|
||||||
|
func StartSpan(ctx context.Context, name string, o ...trace.StartOption) (context.Context, *trace.Span) {
|
||||||
|
ctx, s := trace.StartSpan(ctx, name, o...)
|
||||||
|
return update(ctx, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartSpanWithRemoteParent wraps "go.opencensus.io/trace".StartSpanWithRemoteParent.
|
||||||
|
//
|
||||||
|
// See StartSpan for more information.
|
||||||
|
func StartSpanWithRemoteParent(ctx context.Context, name string, parent trace.SpanContext, o ...trace.StartOption) (context.Context, *trace.Span) {
|
||||||
|
ctx, s := trace.StartSpanWithRemoteParent(ctx, name, parent, o...)
|
||||||
|
return update(ctx, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func update(ctx context.Context, s *trace.Span) (context.Context, *trace.Span) {
|
||||||
|
if s.IsRecordingEvents() {
|
||||||
|
ctx = log.UpdateContext(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ctx, s
|
||||||
|
}
|
||||||
|
|
||||||
|
var WithServerSpanKind = trace.WithSpanKind(trace.SpanKindServer)
|
||||||
|
var WithClientSpanKind = trace.WithSpanKind(trace.SpanKindClient)
|
||||||
|
56
vendor/github.com/Microsoft/hcsshim/internal/protocol/guestrequest/types.go
generated
vendored
Normal file
56
vendor/github.com/Microsoft/hcsshim/internal/protocol/guestrequest/types.go
generated
vendored
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
package guestrequest
|
||||||
|
|
||||||
|
// These are constants for v2 schema modify requests.
|
||||||
|
|
||||||
|
type RequestType string
|
||||||
|
type ResourceType string
|
||||||
|
|
||||||
|
// RequestType const
|
||||||
|
const (
|
||||||
|
RequestTypeAdd RequestType = "Add"
|
||||||
|
RequestTypeRemove RequestType = "Remove"
|
||||||
|
RequestTypePreAdd RequestType = "PreAdd" // For networking
|
||||||
|
RequestTypeUpdate RequestType = "Update"
|
||||||
|
)
|
||||||
|
|
||||||
|
type SignalValueWCOW string
|
||||||
|
|
||||||
|
const (
|
||||||
|
SignalValueWCOWCtrlC SignalValueWCOW = "CtrlC"
|
||||||
|
SignalValueWCOWCtrlBreak SignalValueWCOW = "CtrlBreak"
|
||||||
|
SignalValueWCOWCtrlClose SignalValueWCOW = "CtrlClose"
|
||||||
|
SignalValueWCOWCtrlLogOff SignalValueWCOW = "CtrlLogOff"
|
||||||
|
SignalValueWCOWCtrlShutdown SignalValueWCOW = "CtrlShutdown"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ModificationRequest is for modify commands passed to the guest.
|
||||||
|
type ModificationRequest struct {
|
||||||
|
RequestType RequestType `json:"RequestType,omitempty"`
|
||||||
|
ResourceType ResourceType `json:"ResourceType,omitempty"`
|
||||||
|
Settings interface{} `json:"Settings,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type NetworkModifyRequest struct {
|
||||||
|
AdapterId string `json:"AdapterId,omitempty"` //nolint:stylecheck
|
||||||
|
RequestType RequestType `json:"RequestType,omitempty"`
|
||||||
|
Settings interface{} `json:"Settings,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type RS4NetworkModifyRequest struct {
|
||||||
|
AdapterInstanceId string `json:"AdapterInstanceId,omitempty"` //nolint:stylecheck
|
||||||
|
RequestType RequestType `json:"RequestType,omitempty"`
|
||||||
|
Settings interface{} `json:"Settings,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// V5 GUIDs for SCSI controllers
|
||||||
|
// These GUIDs are created with namespace GUID "d422512d-2bf2-4752-809d-7b82b5fcb1b4"
|
||||||
|
// and index as names. For example, first GUID is created like this:
|
||||||
|
// guid.NewV5("d422512d-2bf2-4752-809d-7b82b5fcb1b4", []byte("0"))
|
||||||
|
ScsiControllerGuids = []string{
|
||||||
|
"df6d0690-79e5-55b6-a5ec-c1e2f77f580a",
|
||||||
|
"0110f83b-de10-5172-a266-78bca56bf50a",
|
||||||
|
"b5d2d8d4-3a75-51bf-945b-3444dc6b8579",
|
||||||
|
"305891a9-b251-5dfe-91a2-c25d9212275b",
|
||||||
|
}
|
||||||
|
)
|
1
vendor/github.com/Microsoft/hcsshim/internal/regstate/doc.go
generated
vendored
Normal file
1
vendor/github.com/Microsoft/hcsshim/internal/regstate/doc.go
generated
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
package regstate
|
2
vendor/github.com/Microsoft/hcsshim/internal/regstate/regstate.go
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/internal/regstate/regstate.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
package regstate
|
package regstate
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
2
vendor/github.com/Microsoft/hcsshim/internal/runhcs/container.go
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/internal/runhcs/container.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
package runhcs
|
package runhcs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
2
vendor/github.com/Microsoft/hcsshim/internal/runhcs/vm.go
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/internal/runhcs/vm.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
package runhcs
|
package runhcs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
1
vendor/github.com/Microsoft/hcsshim/internal/safefile/do.go
generated
vendored
Normal file
1
vendor/github.com/Microsoft/hcsshim/internal/safefile/do.go
generated
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
package safefile
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user