Update CRI to master
Signed-off-by: Michael Crosby <crosbymichael@gmail.com>
This commit is contained in:
parent
4730088cb5
commit
abfc89ce02
18
vendor.conf
18
vendor.conf
@ -20,7 +20,7 @@ github.com/gogo/protobuf v1.0.0
|
|||||||
github.com/gogo/googleapis 08a7655d27152912db7aaf4f983275eaf8d128ef
|
github.com/gogo/googleapis 08a7655d27152912db7aaf4f983275eaf8d128ef
|
||||||
github.com/golang/protobuf v1.1.0
|
github.com/golang/protobuf v1.1.0
|
||||||
github.com/opencontainers/runtime-spec 29686dbc5559d93fb1ef402eeda3e35c38d75af4 # v1.0.1-59-g29686db
|
github.com/opencontainers/runtime-spec 29686dbc5559d93fb1ef402eeda3e35c38d75af4 # v1.0.1-59-g29686db
|
||||||
github.com/opencontainers/runc 6a3f4749b81768cd01ef3da117ef0a19cd572652
|
github.com/opencontainers/runc 029124da7af7360afa781a0234d1b083550f797c
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1
|
||||||
github.com/sirupsen/logrus v1.4.1
|
github.com/sirupsen/logrus v1.4.1
|
||||||
github.com/urfave/cli 7bc6a0acffa589f415f88aca16cc1de5ffd66f9c
|
github.com/urfave/cli 7bc6a0acffa589f415f88aca16cc1de5ffd66f9c
|
||||||
@ -44,9 +44,8 @@ github.com/google/go-cmp v0.1.0
|
|||||||
go.etcd.io/bbolt v1.3.2
|
go.etcd.io/bbolt v1.3.2
|
||||||
|
|
||||||
# cri dependencies
|
# cri dependencies
|
||||||
github.com/containerd/cri 4dd6735020f5596dd41738f8c4f5cb07fa804c5e # master
|
github.com/containerd/cri 0e2afb63ac3a1c8e59abd6f2e9a6ce075135d5b0 # master
|
||||||
github.com/containerd/go-cni 40bcf8ec8acd7372be1d77031d585d5d8e561c90
|
github.com/containerd/go-cni 40bcf8ec8acd7372be1d77031d585d5d8e561c90
|
||||||
github.com/blang/semver v3.1.0
|
|
||||||
github.com/containernetworking/cni v0.6.0
|
github.com/containernetworking/cni v0.6.0
|
||||||
github.com/containernetworking/plugins v0.7.0
|
github.com/containernetworking/plugins v0.7.0
|
||||||
github.com/davecgh/go-spew v1.1.0
|
github.com/davecgh/go-spew v1.1.0
|
||||||
@ -60,7 +59,6 @@ github.com/hashicorp/go-multierror ed905158d87462226a13fe39ddf685ea65f1c11f
|
|||||||
github.com/json-iterator/go 1.1.5
|
github.com/json-iterator/go 1.1.5
|
||||||
github.com/modern-go/reflect2 1.0.1
|
github.com/modern-go/reflect2 1.0.1
|
||||||
github.com/modern-go/concurrent 1.0.3
|
github.com/modern-go/concurrent 1.0.3
|
||||||
github.com/opencontainers/runtime-tools v0.6.0
|
|
||||||
github.com/opencontainers/selinux v1.2.1
|
github.com/opencontainers/selinux v1.2.1
|
||||||
github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0
|
github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0
|
||||||
github.com/tchap/go-patricia v2.2.6
|
github.com/tchap/go-patricia v2.2.6
|
||||||
@ -72,13 +70,13 @@ golang.org/x/oauth2 a6bd8cefa1811bd24b86f8902872e4e8225f74c4
|
|||||||
golang.org/x/time f51c12702a4d776e4c1fa9b0fabab841babae631
|
golang.org/x/time f51c12702a4d776e4c1fa9b0fabab841babae631
|
||||||
gopkg.in/inf.v0 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4
|
gopkg.in/inf.v0 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4
|
||||||
gopkg.in/yaml.v2 v2.2.1
|
gopkg.in/yaml.v2 v2.2.1
|
||||||
k8s.io/api kubernetes-1.13.0
|
k8s.io/api kubernetes-1.15.0-alpha.0
|
||||||
k8s.io/apimachinery kubernetes-1.13.0
|
k8s.io/apimachinery kubernetes-1.15.0-alpha.0
|
||||||
k8s.io/apiserver kubernetes-1.13.0
|
k8s.io/apiserver kubernetes-1.15.0-alpha.0
|
||||||
k8s.io/client-go kubernetes-1.13.0
|
k8s.io/client-go kubernetes-1.15.0-alpha.0
|
||||||
k8s.io/klog 8139d8cb77af419532b33dfa7dd09fbc5f1d344f
|
k8s.io/klog 8139d8cb77af419532b33dfa7dd09fbc5f1d344f
|
||||||
k8s.io/kubernetes v1.13.0
|
k8s.io/kubernetes v1.15.0-alpha.0
|
||||||
k8s.io/utils 0d26856f57b32ec3398579285e5c8a2bfe8c5243
|
k8s.io/utils c2654d5206da6b7b6ace12841e8f359bb89b443c
|
||||||
sigs.k8s.io/yaml v1.1.0
|
sigs.k8s.io/yaml v1.1.0
|
||||||
|
|
||||||
# zfs dependencies
|
# zfs dependencies
|
||||||
|
22
vendor/github.com/blang/semver/LICENSE
generated
vendored
22
vendor/github.com/blang/semver/LICENSE
generated
vendored
@ -1,22 +0,0 @@
|
|||||||
The MIT License
|
|
||||||
|
|
||||||
Copyright (c) 2014 Benedikt Lang <github at benediktlang.de>
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in
|
|
||||||
all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
||||||
THE SOFTWARE.
|
|
||||||
|
|
191
vendor/github.com/blang/semver/README.md
generated
vendored
191
vendor/github.com/blang/semver/README.md
generated
vendored
@ -1,191 +0,0 @@
|
|||||||
semver for golang [](https://drone.io/github.com/blang/semver/latest) [](https://godoc.org/github.com/blang/semver) [](https://coveralls.io/r/blang/semver?branch=master)
|
|
||||||
======
|
|
||||||
|
|
||||||
semver is a [Semantic Versioning](http://semver.org/) library written in golang. It fully covers spec version `2.0.0`.
|
|
||||||
|
|
||||||
Usage
|
|
||||||
-----
|
|
||||||
```bash
|
|
||||||
$ go get github.com/blang/semver
|
|
||||||
```
|
|
||||||
Note: Always vendor your dependencies or fix on a specific version tag.
|
|
||||||
|
|
||||||
```go
|
|
||||||
import github.com/blang/semver
|
|
||||||
v1, err := semver.Make("1.0.0-beta")
|
|
||||||
v2, err := semver.Make("2.0.0-beta")
|
|
||||||
v1.Compare(v2)
|
|
||||||
```
|
|
||||||
|
|
||||||
Also check the [GoDocs](http://godoc.org/github.com/blang/semver).
|
|
||||||
|
|
||||||
Why should I use this lib?
|
|
||||||
-----
|
|
||||||
|
|
||||||
- Fully spec compatible
|
|
||||||
- No reflection
|
|
||||||
- No regex
|
|
||||||
- Fully tested (Coverage >99%)
|
|
||||||
- Readable parsing/validation errors
|
|
||||||
- Fast (See [Benchmarks](#benchmarks))
|
|
||||||
- Only Stdlib
|
|
||||||
- Uses values instead of pointers
|
|
||||||
- Many features, see below
|
|
||||||
|
|
||||||
|
|
||||||
Features
|
|
||||||
-----
|
|
||||||
|
|
||||||
- Parsing and validation at all levels
|
|
||||||
- Comparator-like comparisons
|
|
||||||
- Compare Helper Methods
|
|
||||||
- InPlace manipulation
|
|
||||||
- Ranges `>=1.0.0 <2.0.0 || >=3.0.0 !3.0.1-beta.1`
|
|
||||||
- Sortable (implements sort.Interface)
|
|
||||||
- database/sql compatible (sql.Scanner/Valuer)
|
|
||||||
- encoding/json compatible (json.Marshaler/Unmarshaler)
|
|
||||||
|
|
||||||
Ranges
|
|
||||||
------
|
|
||||||
|
|
||||||
A `Range` is a set of conditions which specify which versions satisfy the range.
|
|
||||||
|
|
||||||
A condition is composed of an operator and a version. The supported operators are:
|
|
||||||
|
|
||||||
- `<1.0.0` Less than `1.0.0`
|
|
||||||
- `<=1.0.0` Less than or equal to `1.0.0`
|
|
||||||
- `>1.0.0` Greater than `1.0.0`
|
|
||||||
- `>=1.0.0` Greater than or equal to `1.0.0`
|
|
||||||
- `1.0.0`, `=1.0.0`, `==1.0.0` Equal to `1.0.0`
|
|
||||||
- `!1.0.0`, `!=1.0.0` Not equal to `1.0.0`. Excludes version `1.0.0`.
|
|
||||||
|
|
||||||
A `Range` can link multiple `Ranges` separated by space:
|
|
||||||
|
|
||||||
Ranges can be linked by logical AND:
|
|
||||||
|
|
||||||
- `>1.0.0 <2.0.0` would match between both ranges, so `1.1.1` and `1.8.7` but not `1.0.0` or `2.0.0`
|
|
||||||
- `>1.0.0 <3.0.0 !2.0.3-beta.2` would match every version between `1.0.0` and `3.0.0` except `2.0.3-beta.2`
|
|
||||||
|
|
||||||
Ranges can also be linked by logical OR:
|
|
||||||
|
|
||||||
- `<2.0.0 || >=3.0.0` would match `1.x.x` and `3.x.x` but not `2.x.x`
|
|
||||||
|
|
||||||
AND has a higher precedence than OR. It's not possible to use brackets.
|
|
||||||
|
|
||||||
Ranges can be combined by both AND and OR
|
|
||||||
|
|
||||||
- `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1`
|
|
||||||
|
|
||||||
Range usage:
|
|
||||||
|
|
||||||
```
|
|
||||||
v, err := semver.Parse("1.2.3")
|
|
||||||
range, err := semver.ParseRange(">1.0.0 <2.0.0 || >=3.0.0")
|
|
||||||
if range(v) {
|
|
||||||
//valid
|
|
||||||
}
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
Example
|
|
||||||
-----
|
|
||||||
|
|
||||||
Have a look at full examples in [examples/main.go](examples/main.go)
|
|
||||||
|
|
||||||
```go
|
|
||||||
import github.com/blang/semver
|
|
||||||
|
|
||||||
v, err := semver.Make("0.0.1-alpha.preview+123.github")
|
|
||||||
fmt.Printf("Major: %d\n", v.Major)
|
|
||||||
fmt.Printf("Minor: %d\n", v.Minor)
|
|
||||||
fmt.Printf("Patch: %d\n", v.Patch)
|
|
||||||
fmt.Printf("Pre: %s\n", v.Pre)
|
|
||||||
fmt.Printf("Build: %s\n", v.Build)
|
|
||||||
|
|
||||||
// Prerelease versions array
|
|
||||||
if len(v.Pre) > 0 {
|
|
||||||
fmt.Println("Prerelease versions:")
|
|
||||||
for i, pre := range v.Pre {
|
|
||||||
fmt.Printf("%d: %q\n", i, pre)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build meta data array
|
|
||||||
if len(v.Build) > 0 {
|
|
||||||
fmt.Println("Build meta data:")
|
|
||||||
for i, build := range v.Build {
|
|
||||||
fmt.Printf("%d: %q\n", i, build)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
v001, err := semver.Make("0.0.1")
|
|
||||||
// Compare using helpers: v.GT(v2), v.LT, v.GTE, v.LTE
|
|
||||||
v001.GT(v) == true
|
|
||||||
v.LT(v001) == true
|
|
||||||
v.GTE(v) == true
|
|
||||||
v.LTE(v) == true
|
|
||||||
|
|
||||||
// Or use v.Compare(v2) for comparisons (-1, 0, 1):
|
|
||||||
v001.Compare(v) == 1
|
|
||||||
v.Compare(v001) == -1
|
|
||||||
v.Compare(v) == 0
|
|
||||||
|
|
||||||
// Manipulate Version in place:
|
|
||||||
v.Pre[0], err = semver.NewPRVersion("beta")
|
|
||||||
if err != nil {
|
|
||||||
fmt.Printf("Error parsing pre release version: %q", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Println("\nValidate versions:")
|
|
||||||
v.Build[0] = "?"
|
|
||||||
|
|
||||||
err = v.Validate()
|
|
||||||
if err != nil {
|
|
||||||
fmt.Printf("Validation failed: %s\n", err)
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
Benchmarks
|
|
||||||
-----
|
|
||||||
|
|
||||||
BenchmarkParseSimple-4 5000000 390 ns/op 48 B/op 1 allocs/op
|
|
||||||
BenchmarkParseComplex-4 1000000 1813 ns/op 256 B/op 7 allocs/op
|
|
||||||
BenchmarkParseAverage-4 1000000 1171 ns/op 163 B/op 4 allocs/op
|
|
||||||
BenchmarkStringSimple-4 20000000 119 ns/op 16 B/op 1 allocs/op
|
|
||||||
BenchmarkStringLarger-4 10000000 206 ns/op 32 B/op 2 allocs/op
|
|
||||||
BenchmarkStringComplex-4 5000000 324 ns/op 80 B/op 3 allocs/op
|
|
||||||
BenchmarkStringAverage-4 5000000 273 ns/op 53 B/op 2 allocs/op
|
|
||||||
BenchmarkValidateSimple-4 200000000 9.33 ns/op 0 B/op 0 allocs/op
|
|
||||||
BenchmarkValidateComplex-4 3000000 469 ns/op 0 B/op 0 allocs/op
|
|
||||||
BenchmarkValidateAverage-4 5000000 256 ns/op 0 B/op 0 allocs/op
|
|
||||||
BenchmarkCompareSimple-4 100000000 11.8 ns/op 0 B/op 0 allocs/op
|
|
||||||
BenchmarkCompareComplex-4 50000000 30.8 ns/op 0 B/op 0 allocs/op
|
|
||||||
BenchmarkCompareAverage-4 30000000 41.5 ns/op 0 B/op 0 allocs/op
|
|
||||||
BenchmarkSort-4 3000000 419 ns/op 256 B/op 2 allocs/op
|
|
||||||
BenchmarkRangeParseSimple-4 2000000 850 ns/op 192 B/op 5 allocs/op
|
|
||||||
BenchmarkRangeParseAverage-4 1000000 1677 ns/op 400 B/op 10 allocs/op
|
|
||||||
BenchmarkRangeParseComplex-4 300000 5214 ns/op 1440 B/op 30 allocs/op
|
|
||||||
BenchmarkRangeMatchSimple-4 50000000 25.6 ns/op 0 B/op 0 allocs/op
|
|
||||||
BenchmarkRangeMatchAverage-4 30000000 56.4 ns/op 0 B/op 0 allocs/op
|
|
||||||
BenchmarkRangeMatchComplex-4 10000000 153 ns/op 0 B/op 0 allocs/op
|
|
||||||
|
|
||||||
See benchmark cases at [semver_test.go](semver_test.go)
|
|
||||||
|
|
||||||
|
|
||||||
Motivation
|
|
||||||
-----
|
|
||||||
|
|
||||||
I simply couldn't find any lib supporting the full spec. Others were just wrong or used reflection and regex which i don't like.
|
|
||||||
|
|
||||||
|
|
||||||
Contribution
|
|
||||||
-----
|
|
||||||
|
|
||||||
Feel free to make a pull request. For bigger changes create a issue first to discuss about it.
|
|
||||||
|
|
||||||
|
|
||||||
License
|
|
||||||
-----
|
|
||||||
|
|
||||||
See [LICENSE](LICENSE) file.
|
|
23
vendor/github.com/blang/semver/json.go
generated
vendored
23
vendor/github.com/blang/semver/json.go
generated
vendored
@ -1,23 +0,0 @@
|
|||||||
package semver
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
)
|
|
||||||
|
|
||||||
// MarshalJSON implements the encoding/json.Marshaler interface.
|
|
||||||
func (v Version) MarshalJSON() ([]byte, error) {
|
|
||||||
return json.Marshal(v.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON implements the encoding/json.Unmarshaler interface.
|
|
||||||
func (v *Version) UnmarshalJSON(data []byte) (err error) {
|
|
||||||
var versionString string
|
|
||||||
|
|
||||||
if err = json.Unmarshal(data, &versionString); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
*v, err = Parse(versionString)
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
224
vendor/github.com/blang/semver/range.go
generated
vendored
224
vendor/github.com/blang/semver/range.go
generated
vendored
@ -1,224 +0,0 @@
|
|||||||
package semver
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
"unicode"
|
|
||||||
)
|
|
||||||
|
|
||||||
type comparator func(Version, Version) bool
|
|
||||||
|
|
||||||
var (
|
|
||||||
compEQ comparator = func(v1 Version, v2 Version) bool {
|
|
||||||
return v1.Compare(v2) == 0
|
|
||||||
}
|
|
||||||
compNE = func(v1 Version, v2 Version) bool {
|
|
||||||
return v1.Compare(v2) != 0
|
|
||||||
}
|
|
||||||
compGT = func(v1 Version, v2 Version) bool {
|
|
||||||
return v1.Compare(v2) == 1
|
|
||||||
}
|
|
||||||
compGE = func(v1 Version, v2 Version) bool {
|
|
||||||
return v1.Compare(v2) >= 0
|
|
||||||
}
|
|
||||||
compLT = func(v1 Version, v2 Version) bool {
|
|
||||||
return v1.Compare(v2) == -1
|
|
||||||
}
|
|
||||||
compLE = func(v1 Version, v2 Version) bool {
|
|
||||||
return v1.Compare(v2) <= 0
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
type versionRange struct {
|
|
||||||
v Version
|
|
||||||
c comparator
|
|
||||||
}
|
|
||||||
|
|
||||||
// rangeFunc creates a Range from the given versionRange.
|
|
||||||
func (vr *versionRange) rangeFunc() Range {
|
|
||||||
return Range(func(v Version) bool {
|
|
||||||
return vr.c(v, vr.v)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Range represents a range of versions.
|
|
||||||
// A Range can be used to check if a Version satisfies it:
|
|
||||||
//
|
|
||||||
// range, err := semver.ParseRange(">1.0.0 <2.0.0")
|
|
||||||
// range(semver.MustParse("1.1.1") // returns true
|
|
||||||
type Range func(Version) bool
|
|
||||||
|
|
||||||
// OR combines the existing Range with another Range using logical OR.
|
|
||||||
func (rf Range) OR(f Range) Range {
|
|
||||||
return Range(func(v Version) bool {
|
|
||||||
return rf(v) || f(v)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// AND combines the existing Range with another Range using logical AND.
|
|
||||||
func (rf Range) AND(f Range) Range {
|
|
||||||
return Range(func(v Version) bool {
|
|
||||||
return rf(v) && f(v)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseRange parses a range and returns a Range.
|
|
||||||
// If the range could not be parsed an error is returned.
|
|
||||||
//
|
|
||||||
// Valid ranges are:
|
|
||||||
// - "<1.0.0"
|
|
||||||
// - "<=1.0.0"
|
|
||||||
// - ">1.0.0"
|
|
||||||
// - ">=1.0.0"
|
|
||||||
// - "1.0.0", "=1.0.0", "==1.0.0"
|
|
||||||
// - "!1.0.0", "!=1.0.0"
|
|
||||||
//
|
|
||||||
// A Range can consist of multiple ranges separated by space:
|
|
||||||
// Ranges can be linked by logical AND:
|
|
||||||
// - ">1.0.0 <2.0.0" would match between both ranges, so "1.1.1" and "1.8.7" but not "1.0.0" or "2.0.0"
|
|
||||||
// - ">1.0.0 <3.0.0 !2.0.3-beta.2" would match every version between 1.0.0 and 3.0.0 except 2.0.3-beta.2
|
|
||||||
//
|
|
||||||
// Ranges can also be linked by logical OR:
|
|
||||||
// - "<2.0.0 || >=3.0.0" would match "1.x.x" and "3.x.x" but not "2.x.x"
|
|
||||||
//
|
|
||||||
// AND has a higher precedence than OR. It's not possible to use brackets.
|
|
||||||
//
|
|
||||||
// Ranges can be combined by both AND and OR
|
|
||||||
//
|
|
||||||
// - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1`
|
|
||||||
func ParseRange(s string) (Range, error) {
|
|
||||||
parts := splitAndTrim(s)
|
|
||||||
orParts, err := splitORParts(parts)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var orFn Range
|
|
||||||
for _, p := range orParts {
|
|
||||||
var andFn Range
|
|
||||||
for _, ap := range p {
|
|
||||||
opStr, vStr, err := splitComparatorVersion(ap)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
vr, err := buildVersionRange(opStr, vStr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Could not parse Range %q: %s", ap, err)
|
|
||||||
}
|
|
||||||
rf := vr.rangeFunc()
|
|
||||||
|
|
||||||
// Set function
|
|
||||||
if andFn == nil {
|
|
||||||
andFn = rf
|
|
||||||
} else { // Combine with existing function
|
|
||||||
andFn = andFn.AND(rf)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if orFn == nil {
|
|
||||||
orFn = andFn
|
|
||||||
} else {
|
|
||||||
orFn = orFn.OR(andFn)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
return orFn, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// splitORParts splits the already cleaned parts by '||'.
|
|
||||||
// Checks for invalid positions of the operator and returns an
|
|
||||||
// error if found.
|
|
||||||
func splitORParts(parts []string) ([][]string, error) {
|
|
||||||
var ORparts [][]string
|
|
||||||
last := 0
|
|
||||||
for i, p := range parts {
|
|
||||||
if p == "||" {
|
|
||||||
if i == 0 {
|
|
||||||
return nil, fmt.Errorf("First element in range is '||'")
|
|
||||||
}
|
|
||||||
ORparts = append(ORparts, parts[last:i])
|
|
||||||
last = i + 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if last == len(parts) {
|
|
||||||
return nil, fmt.Errorf("Last element in range is '||'")
|
|
||||||
}
|
|
||||||
ORparts = append(ORparts, parts[last:])
|
|
||||||
return ORparts, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// buildVersionRange takes a slice of 2: operator and version
|
|
||||||
// and builds a versionRange, otherwise an error.
|
|
||||||
func buildVersionRange(opStr, vStr string) (*versionRange, error) {
|
|
||||||
c := parseComparator(opStr)
|
|
||||||
if c == nil {
|
|
||||||
return nil, fmt.Errorf("Could not parse comparator %q in %q", opStr, strings.Join([]string{opStr, vStr}, ""))
|
|
||||||
}
|
|
||||||
v, err := Parse(vStr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Could not parse version %q in %q: %s", vStr, strings.Join([]string{opStr, vStr}, ""), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &versionRange{
|
|
||||||
v: v,
|
|
||||||
c: c,
|
|
||||||
}, nil
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// splitAndTrim splits a range string by spaces and cleans leading and trailing spaces
|
|
||||||
func splitAndTrim(s string) (result []string) {
|
|
||||||
last := 0
|
|
||||||
for i := 0; i < len(s); i++ {
|
|
||||||
if s[i] == ' ' {
|
|
||||||
if last < i-1 {
|
|
||||||
result = append(result, s[last:i])
|
|
||||||
}
|
|
||||||
last = i + 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if last < len(s)-1 {
|
|
||||||
result = append(result, s[last:])
|
|
||||||
}
|
|
||||||
// parts := strings.Split(s, " ")
|
|
||||||
// for _, x := range parts {
|
|
||||||
// if s := strings.TrimSpace(x); len(s) != 0 {
|
|
||||||
// result = append(result, s)
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// splitComparatorVersion splits the comparator from the version.
|
|
||||||
// Spaces between the comparator and the version are not allowed.
|
|
||||||
// Input must be free of leading or trailing spaces.
|
|
||||||
func splitComparatorVersion(s string) (string, string, error) {
|
|
||||||
i := strings.IndexFunc(s, unicode.IsDigit)
|
|
||||||
if i == -1 {
|
|
||||||
return "", "", fmt.Errorf("Could not get version from string: %q", s)
|
|
||||||
}
|
|
||||||
return strings.TrimSpace(s[0:i]), s[i:], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseComparator(s string) comparator {
|
|
||||||
switch s {
|
|
||||||
case "==":
|
|
||||||
fallthrough
|
|
||||||
case "":
|
|
||||||
fallthrough
|
|
||||||
case "=":
|
|
||||||
return compEQ
|
|
||||||
case ">":
|
|
||||||
return compGT
|
|
||||||
case ">=":
|
|
||||||
return compGE
|
|
||||||
case "<":
|
|
||||||
return compLT
|
|
||||||
case "<=":
|
|
||||||
return compLE
|
|
||||||
case "!":
|
|
||||||
fallthrough
|
|
||||||
case "!=":
|
|
||||||
return compNE
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
395
vendor/github.com/blang/semver/semver.go
generated
vendored
395
vendor/github.com/blang/semver/semver.go
generated
vendored
@ -1,395 +0,0 @@
|
|||||||
package semver
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
numbers string = "0123456789"
|
|
||||||
alphas = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-"
|
|
||||||
alphanum = alphas + numbers
|
|
||||||
)
|
|
||||||
|
|
||||||
// SpecVersion is the latest fully supported spec version of semver
|
|
||||||
var SpecVersion = Version{
|
|
||||||
Major: 2,
|
|
||||||
Minor: 0,
|
|
||||||
Patch: 0,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Version represents a semver compatible version
|
|
||||||
type Version struct {
|
|
||||||
Major uint64
|
|
||||||
Minor uint64
|
|
||||||
Patch uint64
|
|
||||||
Pre []PRVersion
|
|
||||||
Build []string //No Precendence
|
|
||||||
}
|
|
||||||
|
|
||||||
// Version to string
|
|
||||||
func (v Version) String() string {
|
|
||||||
b := make([]byte, 0, 5)
|
|
||||||
b = strconv.AppendUint(b, v.Major, 10)
|
|
||||||
b = append(b, '.')
|
|
||||||
b = strconv.AppendUint(b, v.Minor, 10)
|
|
||||||
b = append(b, '.')
|
|
||||||
b = strconv.AppendUint(b, v.Patch, 10)
|
|
||||||
|
|
||||||
if len(v.Pre) > 0 {
|
|
||||||
b = append(b, '-')
|
|
||||||
b = append(b, v.Pre[0].String()...)
|
|
||||||
|
|
||||||
for _, pre := range v.Pre[1:] {
|
|
||||||
b = append(b, '.')
|
|
||||||
b = append(b, pre.String()...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(v.Build) > 0 {
|
|
||||||
b = append(b, '+')
|
|
||||||
b = append(b, v.Build[0]...)
|
|
||||||
|
|
||||||
for _, build := range v.Build[1:] {
|
|
||||||
b = append(b, '.')
|
|
||||||
b = append(b, build...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return string(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Equals checks if v is equal to o.
|
|
||||||
func (v Version) Equals(o Version) bool {
|
|
||||||
return (v.Compare(o) == 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// EQ checks if v is equal to o.
|
|
||||||
func (v Version) EQ(o Version) bool {
|
|
||||||
return (v.Compare(o) == 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NE checks if v is not equal to o.
|
|
||||||
func (v Version) NE(o Version) bool {
|
|
||||||
return (v.Compare(o) != 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GT checks if v is greater than o.
|
|
||||||
func (v Version) GT(o Version) bool {
|
|
||||||
return (v.Compare(o) == 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GTE checks if v is greater than or equal to o.
|
|
||||||
func (v Version) GTE(o Version) bool {
|
|
||||||
return (v.Compare(o) >= 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GE checks if v is greater than or equal to o.
|
|
||||||
func (v Version) GE(o Version) bool {
|
|
||||||
return (v.Compare(o) >= 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// LT checks if v is less than o.
|
|
||||||
func (v Version) LT(o Version) bool {
|
|
||||||
return (v.Compare(o) == -1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// LTE checks if v is less than or equal to o.
|
|
||||||
func (v Version) LTE(o Version) bool {
|
|
||||||
return (v.Compare(o) <= 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// LE checks if v is less than or equal to o.
|
|
||||||
func (v Version) LE(o Version) bool {
|
|
||||||
return (v.Compare(o) <= 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compare compares Versions v to o:
|
|
||||||
// -1 == v is less than o
|
|
||||||
// 0 == v is equal to o
|
|
||||||
// 1 == v is greater than o
|
|
||||||
func (v Version) Compare(o Version) int {
|
|
||||||
if v.Major != o.Major {
|
|
||||||
if v.Major > o.Major {
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
if v.Minor != o.Minor {
|
|
||||||
if v.Minor > o.Minor {
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
if v.Patch != o.Patch {
|
|
||||||
if v.Patch > o.Patch {
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
|
|
||||||
// Quick comparison if a version has no prerelease versions
|
|
||||||
if len(v.Pre) == 0 && len(o.Pre) == 0 {
|
|
||||||
return 0
|
|
||||||
} else if len(v.Pre) == 0 && len(o.Pre) > 0 {
|
|
||||||
return 1
|
|
||||||
} else if len(v.Pre) > 0 && len(o.Pre) == 0 {
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
|
|
||||||
i := 0
|
|
||||||
for ; i < len(v.Pre) && i < len(o.Pre); i++ {
|
|
||||||
if comp := v.Pre[i].Compare(o.Pre[i]); comp == 0 {
|
|
||||||
continue
|
|
||||||
} else if comp == 1 {
|
|
||||||
return 1
|
|
||||||
} else {
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If all pr versions are the equal but one has further prversion, this one greater
|
|
||||||
if i == len(v.Pre) && i == len(o.Pre) {
|
|
||||||
return 0
|
|
||||||
} else if i == len(v.Pre) && i < len(o.Pre) {
|
|
||||||
return -1
|
|
||||||
} else {
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate validates v and returns error in case
|
|
||||||
func (v Version) Validate() error {
|
|
||||||
// Major, Minor, Patch already validated using uint64
|
|
||||||
|
|
||||||
for _, pre := range v.Pre {
|
|
||||||
if !pre.IsNum { //Numeric prerelease versions already uint64
|
|
||||||
if len(pre.VersionStr) == 0 {
|
|
||||||
return fmt.Errorf("Prerelease can not be empty %q", pre.VersionStr)
|
|
||||||
}
|
|
||||||
if !containsOnly(pre.VersionStr, alphanum) {
|
|
||||||
return fmt.Errorf("Invalid character(s) found in prerelease %q", pre.VersionStr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, build := range v.Build {
|
|
||||||
if len(build) == 0 {
|
|
||||||
return fmt.Errorf("Build meta data can not be empty %q", build)
|
|
||||||
}
|
|
||||||
if !containsOnly(build, alphanum) {
|
|
||||||
return fmt.Errorf("Invalid character(s) found in build meta data %q", build)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// New is an alias for Parse and returns a pointer, parses version string and returns a validated Version or error
|
|
||||||
func New(s string) (vp *Version, err error) {
|
|
||||||
v, err := Parse(s)
|
|
||||||
vp = &v
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make is an alias for Parse, parses version string and returns a validated Version or error
|
|
||||||
func Make(s string) (Version, error) {
|
|
||||||
return Parse(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse parses version string and returns a validated Version or error
|
|
||||||
func Parse(s string) (Version, error) {
|
|
||||||
if len(s) == 0 {
|
|
||||||
return Version{}, errors.New("Version string empty")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Split into major.minor.(patch+pr+meta)
|
|
||||||
parts := strings.SplitN(s, ".", 3)
|
|
||||||
if len(parts) != 3 {
|
|
||||||
return Version{}, errors.New("No Major.Minor.Patch elements found")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Major
|
|
||||||
if !containsOnly(parts[0], numbers) {
|
|
||||||
return Version{}, fmt.Errorf("Invalid character(s) found in major number %q", parts[0])
|
|
||||||
}
|
|
||||||
if hasLeadingZeroes(parts[0]) {
|
|
||||||
return Version{}, fmt.Errorf("Major number must not contain leading zeroes %q", parts[0])
|
|
||||||
}
|
|
||||||
major, err := strconv.ParseUint(parts[0], 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return Version{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Minor
|
|
||||||
if !containsOnly(parts[1], numbers) {
|
|
||||||
return Version{}, fmt.Errorf("Invalid character(s) found in minor number %q", parts[1])
|
|
||||||
}
|
|
||||||
if hasLeadingZeroes(parts[1]) {
|
|
||||||
return Version{}, fmt.Errorf("Minor number must not contain leading zeroes %q", parts[1])
|
|
||||||
}
|
|
||||||
minor, err := strconv.ParseUint(parts[1], 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return Version{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
v := Version{}
|
|
||||||
v.Major = major
|
|
||||||
v.Minor = minor
|
|
||||||
|
|
||||||
var build, prerelease []string
|
|
||||||
patchStr := parts[2]
|
|
||||||
|
|
||||||
if buildIndex := strings.IndexRune(patchStr, '+'); buildIndex != -1 {
|
|
||||||
build = strings.Split(patchStr[buildIndex+1:], ".")
|
|
||||||
patchStr = patchStr[:buildIndex]
|
|
||||||
}
|
|
||||||
|
|
||||||
if preIndex := strings.IndexRune(patchStr, '-'); preIndex != -1 {
|
|
||||||
prerelease = strings.Split(patchStr[preIndex+1:], ".")
|
|
||||||
patchStr = patchStr[:preIndex]
|
|
||||||
}
|
|
||||||
|
|
||||||
if !containsOnly(patchStr, numbers) {
|
|
||||||
return Version{}, fmt.Errorf("Invalid character(s) found in patch number %q", patchStr)
|
|
||||||
}
|
|
||||||
if hasLeadingZeroes(patchStr) {
|
|
||||||
return Version{}, fmt.Errorf("Patch number must not contain leading zeroes %q", patchStr)
|
|
||||||
}
|
|
||||||
patch, err := strconv.ParseUint(patchStr, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return Version{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
v.Patch = patch
|
|
||||||
|
|
||||||
// Prerelease
|
|
||||||
for _, prstr := range prerelease {
|
|
||||||
parsedPR, err := NewPRVersion(prstr)
|
|
||||||
if err != nil {
|
|
||||||
return Version{}, err
|
|
||||||
}
|
|
||||||
v.Pre = append(v.Pre, parsedPR)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build meta data
|
|
||||||
for _, str := range build {
|
|
||||||
if len(str) == 0 {
|
|
||||||
return Version{}, errors.New("Build meta data is empty")
|
|
||||||
}
|
|
||||||
if !containsOnly(str, alphanum) {
|
|
||||||
return Version{}, fmt.Errorf("Invalid character(s) found in build meta data %q", str)
|
|
||||||
}
|
|
||||||
v.Build = append(v.Build, str)
|
|
||||||
}
|
|
||||||
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MustParse is like Parse but panics if the version cannot be parsed.
|
|
||||||
func MustParse(s string) Version {
|
|
||||||
v, err := Parse(s)
|
|
||||||
if err != nil {
|
|
||||||
panic(`semver: Parse(` + s + `): ` + err.Error())
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// PRVersion represents a PreRelease Version
|
|
||||||
type PRVersion struct {
|
|
||||||
VersionStr string
|
|
||||||
VersionNum uint64
|
|
||||||
IsNum bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewPRVersion creates a new valid prerelease version
|
|
||||||
func NewPRVersion(s string) (PRVersion, error) {
|
|
||||||
if len(s) == 0 {
|
|
||||||
return PRVersion{}, errors.New("Prerelease is empty")
|
|
||||||
}
|
|
||||||
v := PRVersion{}
|
|
||||||
if containsOnly(s, numbers) {
|
|
||||||
if hasLeadingZeroes(s) {
|
|
||||||
return PRVersion{}, fmt.Errorf("Numeric PreRelease version must not contain leading zeroes %q", s)
|
|
||||||
}
|
|
||||||
num, err := strconv.ParseUint(s, 10, 64)
|
|
||||||
|
|
||||||
// Might never be hit, but just in case
|
|
||||||
if err != nil {
|
|
||||||
return PRVersion{}, err
|
|
||||||
}
|
|
||||||
v.VersionNum = num
|
|
||||||
v.IsNum = true
|
|
||||||
} else if containsOnly(s, alphanum) {
|
|
||||||
v.VersionStr = s
|
|
||||||
v.IsNum = false
|
|
||||||
} else {
|
|
||||||
return PRVersion{}, fmt.Errorf("Invalid character(s) found in prerelease %q", s)
|
|
||||||
}
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsNumeric checks if prerelease-version is numeric
|
|
||||||
func (v PRVersion) IsNumeric() bool {
|
|
||||||
return v.IsNum
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compare compares two PreRelease Versions v and o:
|
|
||||||
// -1 == v is less than o
|
|
||||||
// 0 == v is equal to o
|
|
||||||
// 1 == v is greater than o
|
|
||||||
func (v PRVersion) Compare(o PRVersion) int {
|
|
||||||
if v.IsNum && !o.IsNum {
|
|
||||||
return -1
|
|
||||||
} else if !v.IsNum && o.IsNum {
|
|
||||||
return 1
|
|
||||||
} else if v.IsNum && o.IsNum {
|
|
||||||
if v.VersionNum == o.VersionNum {
|
|
||||||
return 0
|
|
||||||
} else if v.VersionNum > o.VersionNum {
|
|
||||||
return 1
|
|
||||||
} else {
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
} else { // both are Alphas
|
|
||||||
if v.VersionStr == o.VersionStr {
|
|
||||||
return 0
|
|
||||||
} else if v.VersionStr > o.VersionStr {
|
|
||||||
return 1
|
|
||||||
} else {
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// PreRelease version to string
|
|
||||||
func (v PRVersion) String() string {
|
|
||||||
if v.IsNum {
|
|
||||||
return strconv.FormatUint(v.VersionNum, 10)
|
|
||||||
}
|
|
||||||
return v.VersionStr
|
|
||||||
}
|
|
||||||
|
|
||||||
func containsOnly(s string, set string) bool {
|
|
||||||
return strings.IndexFunc(s, func(r rune) bool {
|
|
||||||
return !strings.ContainsRune(set, r)
|
|
||||||
}) == -1
|
|
||||||
}
|
|
||||||
|
|
||||||
func hasLeadingZeroes(s string) bool {
|
|
||||||
return len(s) > 1 && s[0] == '0'
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBuildVersion creates a new valid build version
|
|
||||||
func NewBuildVersion(s string) (string, error) {
|
|
||||||
if len(s) == 0 {
|
|
||||||
return "", errors.New("Buildversion is empty")
|
|
||||||
}
|
|
||||||
if !containsOnly(s, alphanum) {
|
|
||||||
return "", fmt.Errorf("Invalid character(s) found in build meta data %q", s)
|
|
||||||
}
|
|
||||||
return s, nil
|
|
||||||
}
|
|
28
vendor/github.com/blang/semver/sort.go
generated
vendored
28
vendor/github.com/blang/semver/sort.go
generated
vendored
@ -1,28 +0,0 @@
|
|||||||
package semver
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sort"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Versions represents multiple versions.
|
|
||||||
type Versions []Version
|
|
||||||
|
|
||||||
// Len returns length of version collection
|
|
||||||
func (s Versions) Len() int {
|
|
||||||
return len(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Swap swaps two versions inside the collection by its indices
|
|
||||||
func (s Versions) Swap(i, j int) {
|
|
||||||
s[i], s[j] = s[j], s[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Less checks if version at index i is less than version at index j
|
|
||||||
func (s Versions) Less(i, j int) bool {
|
|
||||||
return s[i].LT(s[j])
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sort sorts a slice of versions
|
|
||||||
func Sort(versions []Version) {
|
|
||||||
sort.Sort(Versions(versions))
|
|
||||||
}
|
|
30
vendor/github.com/blang/semver/sql.go
generated
vendored
30
vendor/github.com/blang/semver/sql.go
generated
vendored
@ -1,30 +0,0 @@
|
|||||||
package semver
|
|
||||||
|
|
||||||
import (
|
|
||||||
"database/sql/driver"
|
|
||||||
"fmt"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Scan implements the database/sql.Scanner interface.
|
|
||||||
func (v *Version) Scan(src interface{}) (err error) {
|
|
||||||
var str string
|
|
||||||
switch src := src.(type) {
|
|
||||||
case string:
|
|
||||||
str = src
|
|
||||||
case []byte:
|
|
||||||
str = string(src)
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("Version.Scan: cannot convert %T to string.", src)
|
|
||||||
}
|
|
||||||
|
|
||||||
if t, err := Parse(str); err == nil {
|
|
||||||
*v = t
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Value implements the database/sql/driver.Valuer interface.
|
|
||||||
func (v Version) Value() (driver.Value, error) {
|
|
||||||
return v.String(), nil
|
|
||||||
}
|
|
11
vendor/github.com/containerd/cri/README.md
generated
vendored
11
vendor/github.com/containerd/cri/README.md
generated
vendored
@ -39,6 +39,17 @@ See [test dashboard](https://k8s-testgrid.appspot.com/sig-node-containerd)
|
|||||||
| | v1.2 | 1.10+ | v1alpha2 |
|
| | v1.2 | 1.10+ | v1alpha2 |
|
||||||
| | HEAD | 1.10+ | v1alpha2 |
|
| | HEAD | 1.10+ | v1alpha2 |
|
||||||
|
|
||||||
|
**Note:** The support table above specifies the Kubernetes Version that was supported at time of release of the containerd - cri integration.
|
||||||
|
|
||||||
|
The following is the current support table for containerd CRI integration taking into account that Kubernetes only supports n-3 minor release versions and 1.10 is now end-of-life.
|
||||||
|
|
||||||
|
| Containerd Version | Kubernetes Version | CRI Version |
|
||||||
|
|:------------------:|:------------------:|:-----------:|
|
||||||
|
| v1.1 | 1.11+ | v1alpha2 |
|
||||||
|
| v1.2 | 1.11+ | v1alpha2 |
|
||||||
|
| HEAD | 1.11+ | v1alpha2 |
|
||||||
|
|
||||||
|
|
||||||
## Production Quality Cluster on GCE
|
## Production Quality Cluster on GCE
|
||||||
For a production quality cluster on GCE brought up with `kube-up.sh` refer [here](docs/kube-up.md).
|
For a production quality cluster on GCE brought up with `kube-up.sh` refer [here](docs/kube-up.md).
|
||||||
## Installing with Ansible and Kubeadm
|
## Installing with Ansible and Kubeadm
|
||||||
|
6
vendor/github.com/containerd/cri/cri.go
generated
vendored
6
vendor/github.com/containerd/cri/cri.go
generated
vendored
@ -19,6 +19,7 @@ package cri
|
|||||||
import (
|
import (
|
||||||
"flag"
|
"flag"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/containerd/containerd"
|
"github.com/containerd/containerd"
|
||||||
"github.com/containerd/containerd/api/services/containers/v1"
|
"github.com/containerd/containerd/api/services/containers/v1"
|
||||||
@ -118,6 +119,11 @@ func validateConfig(c *criconfig.Config) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if c.StreamIdleTimeout != "" {
|
||||||
|
if _, err := time.ParseDuration(c.StreamIdleTimeout); err != nil {
|
||||||
|
return errors.Wrap(err, "invalid stream idle timeout")
|
||||||
|
}
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
9
vendor/github.com/containerd/cri/pkg/annotations/annotations.go
generated
vendored
9
vendor/github.com/containerd/cri/pkg/annotations/annotations.go
generated
vendored
@ -32,6 +32,15 @@ const (
|
|||||||
// SandboxID is the sandbox ID annotation
|
// SandboxID is the sandbox ID annotation
|
||||||
SandboxID = "io.kubernetes.cri.sandbox-id"
|
SandboxID = "io.kubernetes.cri.sandbox-id"
|
||||||
|
|
||||||
|
// SandboxLogDir is the pod log directory annotation.
|
||||||
|
// If the sandbox needs to generate any log, it will put it into this directory.
|
||||||
|
// Kubelet will be responsible for:
|
||||||
|
// 1) Monitoring the disk usage of the log, and including it as part of the pod
|
||||||
|
// ephemeral storage usage.
|
||||||
|
// 2) Cleaning up the logs when the pod is deleted.
|
||||||
|
// NOTE: Kubelet is not responsible for rotating the logs.
|
||||||
|
SandboxLogDir = "io.kubernetes.cri.sandbox-log-directory"
|
||||||
|
|
||||||
// UntrustedWorkload is the sandbox annotation for untrusted workload. Untrusted
|
// UntrustedWorkload is the sandbox annotation for untrusted workload. Untrusted
|
||||||
// workload can only run on dedicated runtime for untrusted workload.
|
// workload can only run on dedicated runtime for untrusted workload.
|
||||||
UntrustedWorkload = "io.kubernetes.cri.untrusted-workload"
|
UntrustedWorkload = "io.kubernetes.cri.untrusted-workload"
|
||||||
|
10
vendor/github.com/containerd/cri/pkg/config/config.go
generated
vendored
10
vendor/github.com/containerd/cri/pkg/config/config.go
generated
vendored
@ -19,6 +19,7 @@ package config
|
|||||||
import (
|
import (
|
||||||
"github.com/BurntSushi/toml"
|
"github.com/BurntSushi/toml"
|
||||||
"github.com/containerd/containerd"
|
"github.com/containerd/containerd"
|
||||||
|
"k8s.io/kubernetes/pkg/kubelet/server/streaming"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Runtime struct to contain the type(ID), engine, and root variables for a default runtime
|
// Runtime struct to contain the type(ID), engine, and root variables for a default runtime
|
||||||
@ -30,6 +31,9 @@ type Runtime struct {
|
|||||||
// This only works for runtime type "io.containerd.runtime.v1.linux".
|
// This only works for runtime type "io.containerd.runtime.v1.linux".
|
||||||
// DEPRECATED: use Options instead. Remove when shim v1 is deprecated.
|
// DEPRECATED: use Options instead. Remove when shim v1 is deprecated.
|
||||||
Engine string `toml:"runtime_engine" json:"runtimeEngine"`
|
Engine string `toml:"runtime_engine" json:"runtimeEngine"`
|
||||||
|
// PodAnnotations is a list of pod annotations passed to both pod sandbox as well as
|
||||||
|
// container OCI annotations.
|
||||||
|
PodAnnotations []string `toml:"pod_annotations" json:"PodAnnotations"`
|
||||||
// Root is the directory used by containerd for runtime state.
|
// Root is the directory used by containerd for runtime state.
|
||||||
// DEPRECATED: use Options instead. Remove when shim v1 is deprecated.
|
// DEPRECATED: use Options instead. Remove when shim v1 is deprecated.
|
||||||
// This only works for runtime type "io.containerd.runtime.v1.linux".
|
// This only works for runtime type "io.containerd.runtime.v1.linux".
|
||||||
@ -124,6 +128,11 @@ type PluginConfig struct {
|
|||||||
StreamServerAddress string `toml:"stream_server_address" json:"streamServerAddress"`
|
StreamServerAddress string `toml:"stream_server_address" json:"streamServerAddress"`
|
||||||
// StreamServerPort is the port streaming server is listening on.
|
// StreamServerPort is the port streaming server is listening on.
|
||||||
StreamServerPort string `toml:"stream_server_port" json:"streamServerPort"`
|
StreamServerPort string `toml:"stream_server_port" json:"streamServerPort"`
|
||||||
|
// StreamIdleTimeout is the maximum time a streaming connection
|
||||||
|
// can be idle before the connection is automatically closed.
|
||||||
|
// The string is in the golang duration format, see:
|
||||||
|
// https://golang.org/pkg/time/#ParseDuration
|
||||||
|
StreamIdleTimeout string `toml:"stream_idle_timeout" json:"streamIdleTimeout"`
|
||||||
// EnableSelinux indicates to enable the selinux support.
|
// EnableSelinux indicates to enable the selinux support.
|
||||||
EnableSelinux bool `toml:"enable_selinux" json:"enableSelinux"`
|
EnableSelinux bool `toml:"enable_selinux" json:"enableSelinux"`
|
||||||
// SandboxImage is the image used by sandbox container.
|
// SandboxImage is the image used by sandbox container.
|
||||||
@ -196,6 +205,7 @@ func DefaultConfig() PluginConfig {
|
|||||||
},
|
},
|
||||||
StreamServerAddress: "127.0.0.1",
|
StreamServerAddress: "127.0.0.1",
|
||||||
StreamServerPort: "0",
|
StreamServerPort: "0",
|
||||||
|
StreamIdleTimeout: streaming.DefaultConfig.StreamIdleTimeout.String(), // 4 hour
|
||||||
EnableSelinux: false,
|
EnableSelinux: false,
|
||||||
EnableTLSStreaming: false,
|
EnableTLSStreaming: false,
|
||||||
X509KeyPairStreaming: X509KeyPairStreaming{
|
X509KeyPairStreaming: X509KeyPairStreaming{
|
||||||
|
4
vendor/github.com/containerd/cri/pkg/containerd/importer/importer.go
generated
vendored
4
vendor/github.com/containerd/cri/pkg/containerd/importer/importer.go
generated
vendored
@ -32,13 +32,13 @@ import (
|
|||||||
"github.com/containerd/containerd/images"
|
"github.com/containerd/containerd/images"
|
||||||
"github.com/containerd/containerd/leases"
|
"github.com/containerd/containerd/leases"
|
||||||
"github.com/containerd/containerd/log"
|
"github.com/containerd/containerd/log"
|
||||||
|
"github.com/docker/distribution/reference"
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
"github.com/opencontainers/image-spec/specs-go"
|
"github.com/opencontainers/image-spec/specs-go"
|
||||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
ctrdutil "github.com/containerd/cri/pkg/containerd/util"
|
ctrdutil "github.com/containerd/cri/pkg/containerd/util"
|
||||||
"github.com/containerd/cri/pkg/util"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// This code reuses the docker import code from containerd/containerd#1602.
|
// This code reuses the docker import code from containerd/containerd#1602.
|
||||||
@ -220,7 +220,7 @@ func Import(ctx context.Context, client *containerd.Client, reader io.Reader, op
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, ref := range mfst.RepoTags {
|
for _, ref := range mfst.RepoTags {
|
||||||
normalized, err := util.NormalizeImageRef(ref)
|
normalized, err := reference.ParseDockerRef(ref)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return refs, errors.Wrapf(err, "normalize image ref %q", ref)
|
return refs, errors.Wrapf(err, "normalize image ref %q", ref)
|
||||||
}
|
}
|
||||||
|
700
vendor/github.com/containerd/cri/pkg/containerd/opts/spec.go
generated
vendored
700
vendor/github.com/containerd/cri/pkg/containerd/opts/spec.go
generated
vendored
@ -18,16 +18,41 @@ package opts
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/containerd/containerd/containers"
|
"github.com/containerd/containerd/containers"
|
||||||
|
"github.com/containerd/containerd/mount"
|
||||||
"github.com/containerd/containerd/oci"
|
"github.com/containerd/containerd/oci"
|
||||||
|
osinterface "github.com/containerd/cri/pkg/os"
|
||||||
|
"github.com/containerd/cri/pkg/util"
|
||||||
|
imagespec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
|
"github.com/opencontainers/runc/libcontainer/devices"
|
||||||
runtimespec "github.com/opencontainers/runtime-spec/specs-go"
|
runtimespec "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
|
"github.com/opencontainers/selinux/go-selinux/label"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// DefaultSandboxCPUshares is default cpu shares for sandbox container.
|
||||||
|
DefaultSandboxCPUshares = 2
|
||||||
)
|
)
|
||||||
|
|
||||||
// WithAdditionalGIDs adds any additional groups listed for a particular user in the
|
// WithAdditionalGIDs adds any additional groups listed for a particular user in the
|
||||||
// /etc/groups file of the image's root filesystem to the OCI spec's additionalGids array.
|
// /etc/groups file of the image's root filesystem to the OCI spec's additionalGids array.
|
||||||
func WithAdditionalGIDs(userstr string) oci.SpecOpts {
|
func WithAdditionalGIDs(userstr string) oci.SpecOpts {
|
||||||
return func(ctx context.Context, client oci.Client, c *containers.Container, s *runtimespec.Spec) (err error) {
|
return func(ctx context.Context, client oci.Client, c *containers.Container, s *runtimespec.Spec) (err error) {
|
||||||
|
if s.Process == nil {
|
||||||
|
s.Process = &runtimespec.Process{}
|
||||||
|
}
|
||||||
gids := s.Process.User.AdditionalGids
|
gids := s.Process.User.AdditionalGids
|
||||||
if err := oci.WithAdditionalGIDs(userstr)(ctx, client, c, s); err != nil {
|
if err := oci.WithAdditionalGIDs(userstr)(ctx, client, c, s); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -39,13 +64,680 @@ func WithAdditionalGIDs(userstr string) oci.SpecOpts {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func mergeGids(gids1, gids2 []uint32) []uint32 {
|
func mergeGids(gids1, gids2 []uint32) []uint32 {
|
||||||
|
gidsMap := make(map[uint32]struct{})
|
||||||
for _, gid1 := range gids1 {
|
for _, gid1 := range gids1 {
|
||||||
for i, gid2 := range gids2 {
|
gidsMap[gid1] = struct{}{}
|
||||||
if gid1 == gid2 {
|
}
|
||||||
gids2 = append(gids2[:i], gids2[i+1:]...)
|
for _, gid2 := range gids2 {
|
||||||
|
gidsMap[gid2] = struct{}{}
|
||||||
|
}
|
||||||
|
var gids []uint32
|
||||||
|
for gid := range gidsMap {
|
||||||
|
gids = append(gids, gid)
|
||||||
|
}
|
||||||
|
sort.Slice(gids, func(i, j int) bool { return gids[i] < gids[j] })
|
||||||
|
return gids
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithoutRunMount removes the `/run` inside the spec
|
||||||
|
func WithoutRunMount(_ context.Context, _ oci.Client, c *containers.Container, s *runtimespec.Spec) error {
|
||||||
|
var (
|
||||||
|
mounts []runtimespec.Mount
|
||||||
|
currnet = s.Mounts
|
||||||
|
)
|
||||||
|
for _, m := range currnet {
|
||||||
|
if filepath.Clean(m.Destination) == "/run" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
mounts = append(mounts, m)
|
||||||
|
}
|
||||||
|
s.Mounts = mounts
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithoutDefaultSecuritySettings removes the default security settings generated on a spec
|
||||||
|
func WithoutDefaultSecuritySettings(_ context.Context, _ oci.Client, c *containers.Container, s *runtimespec.Spec) error {
|
||||||
|
if s.Process == nil {
|
||||||
|
s.Process = &runtimespec.Process{}
|
||||||
|
}
|
||||||
|
// Make sure no default seccomp/apparmor is specified
|
||||||
|
s.Process.ApparmorProfile = ""
|
||||||
|
if s.Linux != nil {
|
||||||
|
s.Linux.Seccomp = nil
|
||||||
|
}
|
||||||
|
// Remove default rlimits (See issue #515)
|
||||||
|
s.Process.Rlimits = nil
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithRelativeRoot sets the root for the container
|
||||||
|
func WithRelativeRoot(root string) oci.SpecOpts {
|
||||||
|
return func(ctx context.Context, client oci.Client, c *containers.Container, s *runtimespec.Spec) (err error) {
|
||||||
|
if s.Root == nil {
|
||||||
|
s.Root = &runtimespec.Root{}
|
||||||
|
}
|
||||||
|
s.Root.Path = root
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithProcessArgs sets the process args on the spec based on the image and runtime config
|
||||||
|
func WithProcessArgs(config *runtime.ContainerConfig, image *imagespec.ImageConfig) oci.SpecOpts {
|
||||||
|
return func(ctx context.Context, client oci.Client, c *containers.Container, s *runtimespec.Spec) (err error) {
|
||||||
|
command, args := config.GetCommand(), config.GetArgs()
|
||||||
|
// The following logic is migrated from https://github.com/moby/moby/blob/master/daemon/commit.go
|
||||||
|
// TODO(random-liu): Clearly define the commands overwrite behavior.
|
||||||
|
if len(command) == 0 {
|
||||||
|
// Copy array to avoid data race.
|
||||||
|
if len(args) == 0 {
|
||||||
|
args = append([]string{}, image.Cmd...)
|
||||||
|
}
|
||||||
|
if command == nil {
|
||||||
|
command = append([]string{}, image.Entrypoint...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(command) == 0 && len(args) == 0 {
|
||||||
|
return errors.New("no command specified")
|
||||||
|
}
|
||||||
|
return oci.WithProcessArgs(append(command, args...)...)(ctx, client, c, s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithMounts sorts and adds runtime and CRI mounts to the spec
|
||||||
|
func WithMounts(osi osinterface.OS, config *runtime.ContainerConfig, extra []*runtime.Mount, mountLabel string) oci.SpecOpts {
|
||||||
|
return func(ctx context.Context, client oci.Client, _ *containers.Container, s *runtimespec.Spec) (err error) {
|
||||||
|
// mergeMounts merge CRI mounts with extra mounts. If a mount destination
|
||||||
|
// is mounted by both a CRI mount and an extra mount, the CRI mount will
|
||||||
|
// be kept.
|
||||||
|
var (
|
||||||
|
criMounts = config.GetMounts()
|
||||||
|
mounts = append([]*runtime.Mount{}, criMounts...)
|
||||||
|
)
|
||||||
|
// Copy all mounts from extra mounts, except for mounts overriden by CRI.
|
||||||
|
for _, e := range extra {
|
||||||
|
found := false
|
||||||
|
for _, c := range criMounts {
|
||||||
|
if filepath.Clean(e.ContainerPath) == filepath.Clean(c.ContainerPath) {
|
||||||
|
found = true
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if !found {
|
||||||
|
mounts = append(mounts, e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// ---
|
||||||
|
|
||||||
|
// Sort mounts in number of parts. This ensures that high level mounts don't
|
||||||
|
// shadow other mounts.
|
||||||
|
sort.Sort(orderedMounts(mounts))
|
||||||
|
|
||||||
|
// Mount cgroup into the container as readonly, which inherits docker's behavior.
|
||||||
|
s.Mounts = append(s.Mounts, runtimespec.Mount{
|
||||||
|
Source: "cgroup",
|
||||||
|
Destination: "/sys/fs/cgroup",
|
||||||
|
Type: "cgroup",
|
||||||
|
Options: []string{"nosuid", "noexec", "nodev", "relatime", "ro"},
|
||||||
|
})
|
||||||
|
|
||||||
|
// Copy all mounts from default mounts, except for
|
||||||
|
// - mounts overriden by supplied mount;
|
||||||
|
// - all mounts under /dev if a supplied /dev is present.
|
||||||
|
mountSet := make(map[string]struct{})
|
||||||
|
for _, m := range mounts {
|
||||||
|
mountSet[filepath.Clean(m.ContainerPath)] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
defaultMounts := s.Mounts
|
||||||
|
s.Mounts = nil
|
||||||
|
|
||||||
|
for _, m := range defaultMounts {
|
||||||
|
dst := filepath.Clean(m.Destination)
|
||||||
|
if _, ok := mountSet[dst]; ok {
|
||||||
|
// filter out mount overridden by a supplied mount
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if _, mountDev := mountSet["/dev"]; mountDev && strings.HasPrefix(dst, "/dev/") {
|
||||||
|
// filter out everything under /dev if /dev is a supplied mount
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
s.Mounts = append(s.Mounts, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, mount := range mounts {
|
||||||
|
var (
|
||||||
|
dst = mount.GetContainerPath()
|
||||||
|
src = mount.GetHostPath()
|
||||||
|
)
|
||||||
|
// Create the host path if it doesn't exist.
|
||||||
|
// TODO(random-liu): Add CRI validation test for this case.
|
||||||
|
if _, err := osi.Stat(src); err != nil {
|
||||||
|
if !os.IsNotExist(err) {
|
||||||
|
return errors.Wrapf(err, "failed to stat %q", src)
|
||||||
|
}
|
||||||
|
if err := osi.MkdirAll(src, 0755); err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to mkdir %q", src)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// TODO(random-liu): Add cri-containerd integration test or cri validation test
|
||||||
|
// for this.
|
||||||
|
src, err := osi.ResolveSymbolicLink(src)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to resolve symlink %q", src)
|
||||||
|
}
|
||||||
|
if s.Linux == nil {
|
||||||
|
s.Linux = &runtimespec.Linux{}
|
||||||
|
}
|
||||||
|
options := []string{"rbind"}
|
||||||
|
switch mount.GetPropagation() {
|
||||||
|
case runtime.MountPropagation_PROPAGATION_PRIVATE:
|
||||||
|
options = append(options, "rprivate")
|
||||||
|
// Since default root propogation in runc is rprivate ignore
|
||||||
|
// setting the root propagation
|
||||||
|
case runtime.MountPropagation_PROPAGATION_BIDIRECTIONAL:
|
||||||
|
if err := ensureShared(src, osi.LookupMount); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
options = append(options, "rshared")
|
||||||
|
s.Linux.RootfsPropagation = "rshared"
|
||||||
|
case runtime.MountPropagation_PROPAGATION_HOST_TO_CONTAINER:
|
||||||
|
if err := ensureSharedOrSlave(src, osi.LookupMount); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
options = append(options, "rslave")
|
||||||
|
if s.Linux.RootfsPropagation != "rshared" &&
|
||||||
|
s.Linux.RootfsPropagation != "rslave" {
|
||||||
|
s.Linux.RootfsPropagation = "rslave"
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
logrus.Warnf("Unknown propagation mode for hostPath %q", mount.HostPath)
|
||||||
|
options = append(options, "rprivate")
|
||||||
|
}
|
||||||
|
|
||||||
|
// NOTE(random-liu): we don't change all mounts to `ro` when root filesystem
|
||||||
|
// is readonly. This is different from docker's behavior, but make more sense.
|
||||||
|
if mount.GetReadonly() {
|
||||||
|
options = append(options, "ro")
|
||||||
|
} else {
|
||||||
|
options = append(options, "rw")
|
||||||
|
}
|
||||||
|
|
||||||
|
if mount.GetSelinuxRelabel() {
|
||||||
|
if err := label.Relabel(src, mountLabel, true); err != nil && err != unix.ENOTSUP {
|
||||||
|
return errors.Wrapf(err, "relabel %q with %q failed", src, mountLabel)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.Mounts = append(s.Mounts, runtimespec.Mount{
|
||||||
|
Source: src,
|
||||||
|
Destination: dst,
|
||||||
|
Type: "bind",
|
||||||
|
Options: options,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
return append(gids1, gids2...)
|
}
|
||||||
|
|
||||||
|
// mounts defines how to sort runtime.Mount.
|
||||||
|
// This is the same with the Docker implementation:
|
||||||
|
// https://github.com/moby/moby/blob/17.05.x/daemon/volumes.go#L26
|
||||||
|
type orderedMounts []*runtime.Mount
|
||||||
|
|
||||||
|
// Len returns the number of mounts. Used in sorting.
|
||||||
|
func (m orderedMounts) Len() int {
|
||||||
|
return len(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Less returns true if the number of parts (a/b/c would be 3 parts) in the
|
||||||
|
// mount indexed by parameter 1 is less than that of the mount indexed by
|
||||||
|
// parameter 2. Used in sorting.
|
||||||
|
func (m orderedMounts) Less(i, j int) bool {
|
||||||
|
return m.parts(i) < m.parts(j)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Swap swaps two items in an array of mounts. Used in sorting
|
||||||
|
func (m orderedMounts) Swap(i, j int) {
|
||||||
|
m[i], m[j] = m[j], m[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
// parts returns the number of parts in the destination of a mount. Used in sorting.
|
||||||
|
func (m orderedMounts) parts(i int) int {
|
||||||
|
return strings.Count(filepath.Clean(m[i].ContainerPath), string(os.PathSeparator))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure mount point on which path is mounted, is shared.
|
||||||
|
func ensureShared(path string, lookupMount func(string) (mount.Info, error)) error {
|
||||||
|
mountInfo, err := lookupMount(path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make sure source mount point is shared.
|
||||||
|
optsSplit := strings.Split(mountInfo.Optional, " ")
|
||||||
|
for _, opt := range optsSplit {
|
||||||
|
if strings.HasPrefix(opt, "shared:") {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return errors.Errorf("path %q is mounted on %q but it is not a shared mount", path, mountInfo.Mountpoint)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ensure mount point on which path is mounted, is either shared or slave.
|
||||||
|
func ensureSharedOrSlave(path string, lookupMount func(string) (mount.Info, error)) error {
|
||||||
|
mountInfo, err := lookupMount(path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Make sure source mount point is shared.
|
||||||
|
optsSplit := strings.Split(mountInfo.Optional, " ")
|
||||||
|
for _, opt := range optsSplit {
|
||||||
|
if strings.HasPrefix(opt, "shared:") {
|
||||||
|
return nil
|
||||||
|
} else if strings.HasPrefix(opt, "master:") {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return errors.Errorf("path %q is mounted on %q but it is not a shared or slave mount", path, mountInfo.Mountpoint)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithPrivilegedDevices allows all host devices inside the container
|
||||||
|
func WithPrivilegedDevices(_ context.Context, _ oci.Client, _ *containers.Container, s *runtimespec.Spec) error {
|
||||||
|
if s.Linux == nil {
|
||||||
|
s.Linux = &runtimespec.Linux{}
|
||||||
|
}
|
||||||
|
if s.Linux.Resources == nil {
|
||||||
|
s.Linux.Resources = &runtimespec.LinuxResources{}
|
||||||
|
}
|
||||||
|
hostDevices, err := devices.HostDevices()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, hostDevice := range hostDevices {
|
||||||
|
rd := runtimespec.LinuxDevice{
|
||||||
|
Path: hostDevice.Path,
|
||||||
|
Type: string(hostDevice.Type),
|
||||||
|
Major: hostDevice.Major,
|
||||||
|
Minor: hostDevice.Minor,
|
||||||
|
UID: &hostDevice.Uid,
|
||||||
|
GID: &hostDevice.Gid,
|
||||||
|
}
|
||||||
|
if hostDevice.Major == 0 && hostDevice.Minor == 0 {
|
||||||
|
// Invalid device, most likely a symbolic link, skip it.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
addDevice(s, rd)
|
||||||
|
}
|
||||||
|
s.Linux.Resources.Devices = []runtimespec.LinuxDeviceCgroup{
|
||||||
|
{
|
||||||
|
Allow: true,
|
||||||
|
Access: "rwm",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func addDevice(s *runtimespec.Spec, rd runtimespec.LinuxDevice) {
|
||||||
|
for i, dev := range s.Linux.Devices {
|
||||||
|
if dev.Path == rd.Path {
|
||||||
|
s.Linux.Devices[i] = rd
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.Linux.Devices = append(s.Linux.Devices, rd)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithDevices sets the provided devices onto the container spec
|
||||||
|
func WithDevices(osi osinterface.OS, config *runtime.ContainerConfig) oci.SpecOpts {
|
||||||
|
return func(ctx context.Context, client oci.Client, c *containers.Container, s *runtimespec.Spec) (err error) {
|
||||||
|
if s.Linux == nil {
|
||||||
|
s.Linux = &runtimespec.Linux{}
|
||||||
|
}
|
||||||
|
if s.Linux.Resources == nil {
|
||||||
|
s.Linux.Resources = &runtimespec.LinuxResources{}
|
||||||
|
}
|
||||||
|
for _, device := range config.GetDevices() {
|
||||||
|
path, err := osi.ResolveSymbolicLink(device.HostPath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
dev, err := devices.DeviceFromPath(path, device.Permissions)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
rd := runtimespec.LinuxDevice{
|
||||||
|
Path: device.ContainerPath,
|
||||||
|
Type: string(dev.Type),
|
||||||
|
Major: dev.Major,
|
||||||
|
Minor: dev.Minor,
|
||||||
|
UID: &dev.Uid,
|
||||||
|
GID: &dev.Gid,
|
||||||
|
}
|
||||||
|
|
||||||
|
addDevice(s, rd)
|
||||||
|
|
||||||
|
s.Linux.Resources.Devices = append(s.Linux.Resources.Devices, runtimespec.LinuxDeviceCgroup{
|
||||||
|
Allow: true,
|
||||||
|
Type: string(dev.Type),
|
||||||
|
Major: &dev.Major,
|
||||||
|
Minor: &dev.Minor,
|
||||||
|
Access: dev.Permissions,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithCapabilities sets the provided capabilties from the security context
|
||||||
|
func WithCapabilities(sc *runtime.LinuxContainerSecurityContext) oci.SpecOpts {
|
||||||
|
capabilities := sc.GetCapabilities()
|
||||||
|
if capabilities == nil {
|
||||||
|
return nullOpt
|
||||||
|
}
|
||||||
|
|
||||||
|
var opts []oci.SpecOpts
|
||||||
|
// Add/drop all capabilities if "all" is specified, so that
|
||||||
|
// following individual add/drop could still work. E.g.
|
||||||
|
// AddCapabilities: []string{"ALL"}, DropCapabilities: []string{"CHOWN"}
|
||||||
|
// will be all capabilities without `CAP_CHOWN`.
|
||||||
|
if util.InStringSlice(capabilities.GetAddCapabilities(), "ALL") {
|
||||||
|
opts = append(opts, oci.WithAllCapabilities)
|
||||||
|
}
|
||||||
|
if util.InStringSlice(capabilities.GetDropCapabilities(), "ALL") {
|
||||||
|
opts = append(opts, oci.WithCapabilities(nil))
|
||||||
|
}
|
||||||
|
|
||||||
|
var caps []string
|
||||||
|
for _, c := range capabilities.GetAddCapabilities() {
|
||||||
|
if strings.ToUpper(c) == "ALL" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Capabilities in CRI doesn't have `CAP_` prefix, so add it.
|
||||||
|
caps = append(caps, "CAP_"+strings.ToUpper(c))
|
||||||
|
}
|
||||||
|
opts = append(opts, oci.WithAddedCapabilities(caps))
|
||||||
|
|
||||||
|
caps = []string{}
|
||||||
|
for _, c := range capabilities.GetDropCapabilities() {
|
||||||
|
if strings.ToUpper(c) == "ALL" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
caps = append(caps, "CAP_"+strings.ToUpper(c))
|
||||||
|
}
|
||||||
|
opts = append(opts, oci.WithDroppedCapabilities(caps))
|
||||||
|
return oci.Compose(opts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithoutAmbientCaps removes the ambient caps from the spec
|
||||||
|
func WithoutAmbientCaps(_ context.Context, _ oci.Client, c *containers.Container, s *runtimespec.Spec) error {
|
||||||
|
if s.Process == nil {
|
||||||
|
s.Process = &runtimespec.Process{}
|
||||||
|
}
|
||||||
|
if s.Process.Capabilities == nil {
|
||||||
|
s.Process.Capabilities = &runtimespec.LinuxCapabilities{}
|
||||||
|
}
|
||||||
|
s.Process.Capabilities.Ambient = nil
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithDisabledCgroups clears the Cgroups Path from the spec
|
||||||
|
func WithDisabledCgroups(_ context.Context, _ oci.Client, c *containers.Container, s *runtimespec.Spec) error {
|
||||||
|
if s.Linux == nil {
|
||||||
|
s.Linux = &runtimespec.Linux{}
|
||||||
|
}
|
||||||
|
s.Linux.CgroupsPath = ""
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithSelinuxLabels sets the mount and process labels
|
||||||
|
func WithSelinuxLabels(process, mount string) oci.SpecOpts {
|
||||||
|
return func(ctx context.Context, client oci.Client, c *containers.Container, s *runtimespec.Spec) (err error) {
|
||||||
|
if s.Linux == nil {
|
||||||
|
s.Linux = &runtimespec.Linux{}
|
||||||
|
}
|
||||||
|
if s.Process == nil {
|
||||||
|
s.Process = &runtimespec.Process{}
|
||||||
|
}
|
||||||
|
s.Linux.MountLabel = mount
|
||||||
|
s.Process.SelinuxLabel = process
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithResources sets the provided resource restrictions
|
||||||
|
func WithResources(resources *runtime.LinuxContainerResources) oci.SpecOpts {
|
||||||
|
return func(ctx context.Context, client oci.Client, c *containers.Container, s *runtimespec.Spec) (err error) {
|
||||||
|
if resources == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if s.Linux == nil {
|
||||||
|
s.Linux = &runtimespec.Linux{}
|
||||||
|
}
|
||||||
|
if s.Linux.Resources == nil {
|
||||||
|
s.Linux.Resources = &runtimespec.LinuxResources{}
|
||||||
|
}
|
||||||
|
if s.Linux.Resources.CPU == nil {
|
||||||
|
s.Linux.Resources.CPU = &runtimespec.LinuxCPU{}
|
||||||
|
}
|
||||||
|
if s.Linux.Resources.Memory == nil {
|
||||||
|
s.Linux.Resources.Memory = &runtimespec.LinuxMemory{}
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
p = uint64(resources.GetCpuPeriod())
|
||||||
|
q = resources.GetCpuQuota()
|
||||||
|
shares = uint64(resources.GetCpuShares())
|
||||||
|
limit = resources.GetMemoryLimitInBytes()
|
||||||
|
)
|
||||||
|
|
||||||
|
if p != 0 {
|
||||||
|
s.Linux.Resources.CPU.Period = &p
|
||||||
|
}
|
||||||
|
if q != 0 {
|
||||||
|
s.Linux.Resources.CPU.Quota = &q
|
||||||
|
}
|
||||||
|
if shares != 0 {
|
||||||
|
s.Linux.Resources.CPU.Shares = &shares
|
||||||
|
}
|
||||||
|
if cpus := resources.GetCpusetCpus(); cpus != "" {
|
||||||
|
s.Linux.Resources.CPU.Cpus = cpus
|
||||||
|
}
|
||||||
|
if mems := resources.GetCpusetMems(); mems != "" {
|
||||||
|
s.Linux.Resources.CPU.Mems = resources.GetCpusetMems()
|
||||||
|
}
|
||||||
|
if limit != 0 {
|
||||||
|
s.Linux.Resources.Memory.Limit = &limit
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithOOMScoreAdj sets the oom score
|
||||||
|
func WithOOMScoreAdj(config *runtime.ContainerConfig, restrict bool) oci.SpecOpts {
|
||||||
|
return func(ctx context.Context, client oci.Client, c *containers.Container, s *runtimespec.Spec) error {
|
||||||
|
if s.Process == nil {
|
||||||
|
s.Process = &runtimespec.Process{}
|
||||||
|
}
|
||||||
|
|
||||||
|
resources := config.GetLinux().GetResources()
|
||||||
|
if resources == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
adj := int(resources.GetOomScoreAdj())
|
||||||
|
if restrict {
|
||||||
|
var err error
|
||||||
|
adj, err = restrictOOMScoreAdj(adj)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.Process.OOMScoreAdj = &adj
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithSysctls sets the provided sysctls onto the spec
|
||||||
|
func WithSysctls(sysctls map[string]string) oci.SpecOpts {
|
||||||
|
return func(ctx context.Context, client oci.Client, c *containers.Container, s *runtimespec.Spec) error {
|
||||||
|
if s.Linux == nil {
|
||||||
|
s.Linux = &runtimespec.Linux{}
|
||||||
|
}
|
||||||
|
if s.Linux.Sysctl == nil {
|
||||||
|
s.Linux.Sysctl = make(map[string]string)
|
||||||
|
}
|
||||||
|
for k, v := range sysctls {
|
||||||
|
s.Linux.Sysctl[k] = v
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithPodOOMScoreAdj sets the oom score for the pod sandbox
|
||||||
|
func WithPodOOMScoreAdj(adj int, restrict bool) oci.SpecOpts {
|
||||||
|
return func(ctx context.Context, client oci.Client, c *containers.Container, s *runtimespec.Spec) error {
|
||||||
|
if s.Process == nil {
|
||||||
|
s.Process = &runtimespec.Process{}
|
||||||
|
}
|
||||||
|
if restrict {
|
||||||
|
var err error
|
||||||
|
adj, err = restrictOOMScoreAdj(adj)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.Process.OOMScoreAdj = &adj
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithSupplementalGroups sets the supplemental groups for the process
|
||||||
|
func WithSupplementalGroups(groups []int64) oci.SpecOpts {
|
||||||
|
return func(ctx context.Context, client oci.Client, c *containers.Container, s *runtimespec.Spec) error {
|
||||||
|
if s.Process == nil {
|
||||||
|
s.Process = &runtimespec.Process{}
|
||||||
|
}
|
||||||
|
var guids []uint32
|
||||||
|
for _, g := range groups {
|
||||||
|
guids = append(guids, uint32(g))
|
||||||
|
}
|
||||||
|
s.Process.User.AdditionalGids = mergeGids(s.Process.User.AdditionalGids, guids)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithAnnotation sets the provided annotation
|
||||||
|
func WithAnnotation(k, v string) oci.SpecOpts {
|
||||||
|
return func(ctx context.Context, client oci.Client, c *containers.Container, s *runtimespec.Spec) error {
|
||||||
|
if s.Annotations == nil {
|
||||||
|
s.Annotations = make(map[string]string)
|
||||||
|
}
|
||||||
|
s.Annotations[k] = v
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithPodNamespaces sets the pod namespaces for the container
|
||||||
|
func WithPodNamespaces(config *runtime.LinuxContainerSecurityContext, pid uint32) oci.SpecOpts {
|
||||||
|
namespaces := config.GetNamespaceOptions()
|
||||||
|
|
||||||
|
opts := []oci.SpecOpts{
|
||||||
|
oci.WithLinuxNamespace(runtimespec.LinuxNamespace{Type: runtimespec.NetworkNamespace, Path: GetNetworkNamespace(pid)}),
|
||||||
|
oci.WithLinuxNamespace(runtimespec.LinuxNamespace{Type: runtimespec.IPCNamespace, Path: GetIPCNamespace(pid)}),
|
||||||
|
oci.WithLinuxNamespace(runtimespec.LinuxNamespace{Type: runtimespec.UTSNamespace, Path: GetUTSNamespace(pid)}),
|
||||||
|
}
|
||||||
|
if namespaces.GetPid() != runtime.NamespaceMode_CONTAINER {
|
||||||
|
opts = append(opts, oci.WithLinuxNamespace(runtimespec.LinuxNamespace{Type: runtimespec.PIDNamespace, Path: GetPIDNamespace(pid)}))
|
||||||
|
}
|
||||||
|
return oci.Compose(opts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithDefaultSandboxShares sets the default sandbox CPU shares
|
||||||
|
func WithDefaultSandboxShares(ctx context.Context, client oci.Client, c *containers.Container, s *runtimespec.Spec) error {
|
||||||
|
if s.Linux == nil {
|
||||||
|
s.Linux = &runtimespec.Linux{}
|
||||||
|
}
|
||||||
|
if s.Linux.Resources == nil {
|
||||||
|
s.Linux.Resources = &runtimespec.LinuxResources{}
|
||||||
|
}
|
||||||
|
if s.Linux.Resources.CPU == nil {
|
||||||
|
s.Linux.Resources.CPU = &runtimespec.LinuxCPU{}
|
||||||
|
}
|
||||||
|
i := uint64(DefaultSandboxCPUshares)
|
||||||
|
s.Linux.Resources.CPU.Shares = &i
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithoutNamespace removes the provided namespace
|
||||||
|
func WithoutNamespace(t runtimespec.LinuxNamespaceType) oci.SpecOpts {
|
||||||
|
return func(ctx context.Context, client oci.Client, c *containers.Container, s *runtimespec.Spec) error {
|
||||||
|
if s.Linux == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var namespaces []runtimespec.LinuxNamespace
|
||||||
|
for i, ns := range s.Linux.Namespaces {
|
||||||
|
if ns.Type != t {
|
||||||
|
namespaces = append(namespaces, s.Linux.Namespaces[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.Linux.Namespaces = namespaces
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func nullOpt(_ context.Context, _ oci.Client, _ *containers.Container, _ *runtimespec.Spec) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getCurrentOOMScoreAdj() (int, error) {
|
||||||
|
b, err := ioutil.ReadFile("/proc/self/oom_score_adj")
|
||||||
|
if err != nil {
|
||||||
|
return 0, errors.Wrap(err, "could not get the daemon oom_score_adj")
|
||||||
|
}
|
||||||
|
s := strings.TrimSpace(string(b))
|
||||||
|
i, err := strconv.Atoi(s)
|
||||||
|
if err != nil {
|
||||||
|
return 0, errors.Wrap(err, "could not get the daemon oom_score_adj")
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func restrictOOMScoreAdj(preferredOOMScoreAdj int) (int, error) {
|
||||||
|
currentOOMScoreAdj, err := getCurrentOOMScoreAdj()
|
||||||
|
if err != nil {
|
||||||
|
return preferredOOMScoreAdj, err
|
||||||
|
}
|
||||||
|
if preferredOOMScoreAdj < currentOOMScoreAdj {
|
||||||
|
return currentOOMScoreAdj, nil
|
||||||
|
}
|
||||||
|
return preferredOOMScoreAdj, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// netNSFormat is the format of network namespace of a process.
|
||||||
|
netNSFormat = "/proc/%v/ns/net"
|
||||||
|
// ipcNSFormat is the format of ipc namespace of a process.
|
||||||
|
ipcNSFormat = "/proc/%v/ns/ipc"
|
||||||
|
// utsNSFormat is the format of uts namespace of a process.
|
||||||
|
utsNSFormat = "/proc/%v/ns/uts"
|
||||||
|
// pidNSFormat is the format of pid namespace of a process.
|
||||||
|
pidNSFormat = "/proc/%v/ns/pid"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetNetworkNamespace returns the network namespace of a process.
|
||||||
|
func GetNetworkNamespace(pid uint32) string {
|
||||||
|
return fmt.Sprintf(netNSFormat, pid)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetIPCNamespace returns the ipc namespace of a process.
|
||||||
|
func GetIPCNamespace(pid uint32) string {
|
||||||
|
return fmt.Sprintf(ipcNSFormat, pid)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetUTSNamespace returns the uts namespace of a process.
|
||||||
|
func GetUTSNamespace(pid uint32) string {
|
||||||
|
return fmt.Sprintf(utsNSFormat, pid)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPIDNamespace returns the pid namespace of a process.
|
||||||
|
func GetPIDNamespace(pid uint32) string {
|
||||||
|
return fmt.Sprintf(pidNSFormat, pid)
|
||||||
}
|
}
|
||||||
|
634
vendor/github.com/containerd/cri/pkg/server/container_create.go
generated
vendored
634
vendor/github.com/containerd/cri/pkg/server/container_create.go
generated
vendored
@ -17,9 +17,7 @@ limitations under the License.
|
|||||||
package server
|
package server
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sort"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
@ -28,21 +26,14 @@ import (
|
|||||||
"github.com/containerd/containerd/containers"
|
"github.com/containerd/containerd/containers"
|
||||||
"github.com/containerd/containerd/contrib/apparmor"
|
"github.com/containerd/containerd/contrib/apparmor"
|
||||||
"github.com/containerd/containerd/contrib/seccomp"
|
"github.com/containerd/containerd/contrib/seccomp"
|
||||||
"github.com/containerd/containerd/mount"
|
|
||||||
"github.com/containerd/containerd/oci"
|
"github.com/containerd/containerd/oci"
|
||||||
"github.com/containerd/typeurl"
|
"github.com/containerd/typeurl"
|
||||||
"github.com/davecgh/go-spew/spew"
|
"github.com/davecgh/go-spew/spew"
|
||||||
imagespec "github.com/opencontainers/image-spec/specs-go/v1"
|
imagespec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
"github.com/opencontainers/runc/libcontainer/devices"
|
|
||||||
runtimespec "github.com/opencontainers/runtime-spec/specs-go"
|
runtimespec "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
"github.com/opencontainers/runtime-tools/generate"
|
|
||||||
"github.com/opencontainers/runtime-tools/validate"
|
|
||||||
"github.com/opencontainers/selinux/go-selinux/label"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/syndtr/gocapability/capability"
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"golang.org/x/sys/unix"
|
|
||||||
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||||
|
|
||||||
"github.com/containerd/cri/pkg/annotations"
|
"github.com/containerd/cri/pkg/annotations"
|
||||||
@ -76,6 +67,7 @@ func init() {
|
|||||||
// CreateContainer creates a new container in the given PodSandbox.
|
// CreateContainer creates a new container in the given PodSandbox.
|
||||||
func (c *criService) CreateContainer(ctx context.Context, r *runtime.CreateContainerRequest) (_ *runtime.CreateContainerResponse, retErr error) {
|
func (c *criService) CreateContainer(ctx context.Context, r *runtime.CreateContainerRequest) (_ *runtime.CreateContainerResponse, retErr error) {
|
||||||
config := r.GetConfig()
|
config := r.GetConfig()
|
||||||
|
logrus.Debugf("Container config %+v", config)
|
||||||
sandboxConfig := r.GetSandboxConfig()
|
sandboxConfig := r.GetSandboxConfig()
|
||||||
sandbox, err := c.sandboxStore.Get(r.GetPodSandboxId())
|
sandbox, err := c.sandboxStore.Get(r.GetPodSandboxId())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -165,7 +157,14 @@ func (c *criService) CreateContainer(ctx context.Context, r *runtime.CreateConta
|
|||||||
// Generate container runtime spec.
|
// Generate container runtime spec.
|
||||||
mounts := c.generateContainerMounts(sandboxID, config)
|
mounts := c.generateContainerMounts(sandboxID, config)
|
||||||
|
|
||||||
spec, err := c.generateContainerSpec(id, sandboxID, sandboxPid, config, sandboxConfig, &image.ImageSpec.Config, append(mounts, volumeMounts...))
|
ociRuntime, err := c.getSandboxRuntime(sandboxConfig, sandbox.Metadata.RuntimeHandler)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "failed to get sandbox runtime")
|
||||||
|
}
|
||||||
|
logrus.Debugf("Use OCI runtime %+v for sandbox %q and container %q", ociRuntime, sandboxID, id)
|
||||||
|
|
||||||
|
spec, err := c.generateContainerSpec(id, sandboxID, sandboxPid, config, sandboxConfig,
|
||||||
|
&image.ImageSpec.Config, append(mounts, volumeMounts...), ociRuntime.PodAnnotations)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "failed to generate container %q spec", id)
|
return nil, errors.Wrapf(err, "failed to generate container %q spec", id)
|
||||||
}
|
}
|
||||||
@ -186,7 +185,7 @@ func (c *criService) CreateContainer(ctx context.Context, r *runtime.CreateConta
|
|||||||
if len(volumeMounts) > 0 {
|
if len(volumeMounts) > 0 {
|
||||||
mountMap := make(map[string]string)
|
mountMap := make(map[string]string)
|
||||||
for _, v := range volumeMounts {
|
for _, v := range volumeMounts {
|
||||||
mountMap[v.HostPath] = v.ContainerPath
|
mountMap[filepath.Clean(v.HostPath)] = v.ContainerPath
|
||||||
}
|
}
|
||||||
opts = append(opts, customopts.WithVolumes(mountMap))
|
opts = append(opts, customopts.WithVolumes(mountMap))
|
||||||
}
|
}
|
||||||
@ -195,7 +194,7 @@ func (c *criService) CreateContainer(ctx context.Context, r *runtime.CreateConta
|
|||||||
|
|
||||||
// Get container log path.
|
// Get container log path.
|
||||||
if config.GetLogPath() != "" {
|
if config.GetLogPath() != "" {
|
||||||
meta.LogPath = filepath.Join(sandbox.Config.GetLogDirectory(), config.GetLogPath())
|
meta.LogPath = filepath.Join(sandboxConfig.GetLogDirectory(), config.GetLogPath())
|
||||||
}
|
}
|
||||||
|
|
||||||
containerIO, err := cio.NewContainerIO(id,
|
containerIO, err := cio.NewContainerIO(id,
|
||||||
@ -310,52 +309,45 @@ func (c *criService) CreateContainer(ctx context.Context, r *runtime.CreateConta
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *criService) generateContainerSpec(id string, sandboxID string, sandboxPid uint32, config *runtime.ContainerConfig,
|
func (c *criService) generateContainerSpec(id string, sandboxID string, sandboxPid uint32, config *runtime.ContainerConfig,
|
||||||
sandboxConfig *runtime.PodSandboxConfig, imageConfig *imagespec.ImageConfig, extraMounts []*runtime.Mount) (*runtimespec.Spec, error) {
|
sandboxConfig *runtime.PodSandboxConfig, imageConfig *imagespec.ImageConfig, extraMounts []*runtime.Mount, runtimePodAnnotations []string) (*runtimespec.Spec, error) {
|
||||||
// Creates a spec Generator with the default spec.
|
|
||||||
spec, err := defaultRuntimeSpec(id)
|
specOpts := []oci.SpecOpts{
|
||||||
if err != nil {
|
customopts.WithoutRunMount,
|
||||||
return nil, err
|
customopts.WithoutDefaultSecuritySettings,
|
||||||
|
customopts.WithRelativeRoot(relativeRootfsPath),
|
||||||
|
customopts.WithProcessArgs(config, imageConfig),
|
||||||
|
// this will be set based on the security context below
|
||||||
|
oci.WithNewPrivileges,
|
||||||
}
|
}
|
||||||
g := newSpecGenerator(spec)
|
|
||||||
|
|
||||||
// Set the relative path to the rootfs of the container from containerd's
|
|
||||||
// pre-defined directory.
|
|
||||||
g.SetRootPath(relativeRootfsPath)
|
|
||||||
|
|
||||||
if err := setOCIProcessArgs(&g, config, imageConfig); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if config.GetWorkingDir() != "" {
|
if config.GetWorkingDir() != "" {
|
||||||
g.SetProcessCwd(config.GetWorkingDir())
|
specOpts = append(specOpts, oci.WithProcessCwd(config.GetWorkingDir()))
|
||||||
} else if imageConfig.WorkingDir != "" {
|
} else if imageConfig.WorkingDir != "" {
|
||||||
g.SetProcessCwd(imageConfig.WorkingDir)
|
specOpts = append(specOpts, oci.WithProcessCwd(imageConfig.WorkingDir))
|
||||||
}
|
}
|
||||||
|
|
||||||
g.SetProcessTerminal(config.GetTty())
|
|
||||||
if config.GetTty() {
|
if config.GetTty() {
|
||||||
g.AddProcessEnv("TERM", "xterm")
|
specOpts = append(specOpts, oci.WithTTY)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add HOSTNAME env.
|
// Add HOSTNAME env.
|
||||||
hostname := sandboxConfig.GetHostname()
|
var (
|
||||||
if sandboxConfig.GetLinux().GetSecurityContext().GetNamespaceOptions().GetNetwork() == runtime.NamespaceMode_NODE &&
|
err error
|
||||||
hostname == "" {
|
hostname = sandboxConfig.GetHostname()
|
||||||
hostname, err = c.os.Hostname()
|
)
|
||||||
if err != nil {
|
if hostname == "" {
|
||||||
|
if hostname, err = c.os.Hostname(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
g.AddProcessEnv(hostnameEnv, hostname)
|
specOpts = append(specOpts, oci.WithEnv([]string{hostnameEnv + "=" + hostname}))
|
||||||
|
|
||||||
// Apply envs from image config first, so that envs from container config
|
// Apply envs from image config first, so that envs from container config
|
||||||
// can override them.
|
// can override them.
|
||||||
if err := addImageEnvs(&g, imageConfig.Env); err != nil {
|
env := imageConfig.Env
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
for _, e := range config.GetEnvs() {
|
for _, e := range config.GetEnvs() {
|
||||||
g.AddProcessEnv(e.GetKey(), e.GetValue())
|
env = append(env, e.GetKey()+"="+e.GetValue())
|
||||||
}
|
}
|
||||||
|
specOpts = append(specOpts, oci.WithEnv(env))
|
||||||
|
|
||||||
securityContext := config.GetLinux().GetSecurityContext()
|
securityContext := config.GetLinux().GetSecurityContext()
|
||||||
selinuxOpt := securityContext.GetSelinuxOptions()
|
selinuxOpt := securityContext.GetSelinuxOptions()
|
||||||
@ -363,90 +355,78 @@ func (c *criService) generateContainerSpec(id string, sandboxID string, sandboxP
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "failed to init selinux options %+v", securityContext.GetSelinuxOptions())
|
return nil, errors.Wrapf(err, "failed to init selinux options %+v", securityContext.GetSelinuxOptions())
|
||||||
}
|
}
|
||||||
|
specOpts = append(specOpts, customopts.WithMounts(c.os, config, extraMounts, mountLabel))
|
||||||
// Merge extra mounts and CRI mounts.
|
|
||||||
mounts := mergeMounts(config.GetMounts(), extraMounts)
|
|
||||||
if err := c.addOCIBindMounts(&g, mounts, mountLabel); err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "failed to set OCI bind mounts %+v", mounts)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Apply masked paths if specified.
|
// Apply masked paths if specified.
|
||||||
// When `MaskedPaths` is not specified, keep runtime default for backward compatibility;
|
// When `MaskedPaths` is not specified, keep runtime default for backward compatibility;
|
||||||
// When `MaskedPaths` is specified, but length is zero, clear masked path list.
|
// When `MaskedPaths` is specified, but length is zero, clear masked path list.
|
||||||
if securityContext.GetMaskedPaths() != nil {
|
// Note: If the container is privileged, then we clear any masked paths later on in the call to setOCIPrivileged()
|
||||||
g.Config.Linux.MaskedPaths = nil
|
if maskedPaths := securityContext.GetMaskedPaths(); maskedPaths != nil {
|
||||||
for _, path := range securityContext.GetMaskedPaths() {
|
specOpts = append(specOpts, oci.WithMaskedPaths(maskedPaths))
|
||||||
g.AddLinuxMaskedPaths(path)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Apply readonly paths if specified.
|
// Apply readonly paths if specified.
|
||||||
if securityContext.GetReadonlyPaths() != nil {
|
// Note: If the container is privileged, then we clear any readonly paths later on in the call to setOCIPrivileged()
|
||||||
g.Config.Linux.ReadonlyPaths = nil
|
|
||||||
for _, path := range securityContext.GetReadonlyPaths() {
|
// Apply readonly paths if specified.
|
||||||
g.AddLinuxReadonlyPaths(path)
|
if roPaths := securityContext.GetReadonlyPaths(); roPaths != nil {
|
||||||
}
|
specOpts = append(specOpts, oci.WithReadonlyPaths(roPaths))
|
||||||
}
|
}
|
||||||
|
|
||||||
if securityContext.GetPrivileged() {
|
if securityContext.GetPrivileged() {
|
||||||
if !sandboxConfig.GetLinux().GetSecurityContext().GetPrivileged() {
|
if !sandboxConfig.GetLinux().GetSecurityContext().GetPrivileged() {
|
||||||
return nil, errors.New("no privileged container allowed in sandbox")
|
return nil, errors.New("no privileged container allowed in sandbox")
|
||||||
}
|
}
|
||||||
if err := setOCIPrivileged(&g, config); err != nil {
|
specOpts = append(specOpts, oci.WithPrivileged, customopts.WithPrivilegedDevices)
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
} else { // not privileged
|
} else { // not privileged
|
||||||
if err := c.addOCIDevices(&g, config.GetDevices()); err != nil {
|
specOpts = append(specOpts, customopts.WithDevices(c.os, config), customopts.WithCapabilities(securityContext))
|
||||||
return nil, errors.Wrapf(err, "failed to set devices mapping %+v", config.GetDevices())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := setOCICapabilities(&g, securityContext.GetCapabilities()); err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "failed to set capabilities %+v",
|
|
||||||
securityContext.GetCapabilities())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Clear all ambient capabilities. The implication of non-root + caps
|
// Clear all ambient capabilities. The implication of non-root + caps
|
||||||
// is not clearly defined in Kubernetes.
|
// is not clearly defined in Kubernetes.
|
||||||
// See https://github.com/kubernetes/kubernetes/issues/56374
|
// See https://github.com/kubernetes/kubernetes/issues/56374
|
||||||
// Keep docker's behavior for now.
|
// Keep docker's behavior for now.
|
||||||
g.Config.Process.Capabilities.Ambient = []string{}
|
specOpts = append(specOpts,
|
||||||
|
customopts.WithoutAmbientCaps,
|
||||||
g.SetProcessSelinuxLabel(processLabel)
|
customopts.WithSelinuxLabels(processLabel, mountLabel),
|
||||||
g.SetLinuxMountLabel(mountLabel)
|
)
|
||||||
|
|
||||||
// TODO: Figure out whether we should set no new privilege for sandbox container by default
|
// TODO: Figure out whether we should set no new privilege for sandbox container by default
|
||||||
g.SetProcessNoNewPrivileges(securityContext.GetNoNewPrivs())
|
if securityContext.GetNoNewPrivs() {
|
||||||
|
specOpts = append(specOpts, oci.WithNoNewPrivileges)
|
||||||
|
}
|
||||||
// TODO(random-liu): [P1] Set selinux options (privileged or not).
|
// TODO(random-liu): [P1] Set selinux options (privileged or not).
|
||||||
|
if securityContext.GetReadonlyRootfs() {
|
||||||
g.SetRootReadonly(securityContext.GetReadonlyRootfs())
|
specOpts = append(specOpts, oci.WithRootFSReadonly())
|
||||||
|
}
|
||||||
|
|
||||||
if c.config.DisableCgroup {
|
if c.config.DisableCgroup {
|
||||||
g.SetLinuxCgroupsPath("")
|
specOpts = append(specOpts, customopts.WithDisabledCgroups)
|
||||||
} else {
|
} else {
|
||||||
setOCILinuxResourceCgroup(&g, config.GetLinux().GetResources())
|
specOpts = append(specOpts, customopts.WithResources(config.GetLinux().GetResources()))
|
||||||
if sandboxConfig.GetLinux().GetCgroupParent() != "" {
|
if sandboxConfig.GetLinux().GetCgroupParent() != "" {
|
||||||
cgroupsPath := getCgroupsPath(sandboxConfig.GetLinux().GetCgroupParent(), id,
|
cgroupsPath := getCgroupsPath(sandboxConfig.GetLinux().GetCgroupParent(), id,
|
||||||
c.config.SystemdCgroup)
|
c.config.SystemdCgroup)
|
||||||
g.SetLinuxCgroupsPath(cgroupsPath)
|
specOpts = append(specOpts, oci.WithCgroup(cgroupsPath))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err := setOCILinuxResourceOOMScoreAdj(&g, config.GetLinux().GetResources(), c.config.RestrictOOMScoreAdj); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set namespaces, share namespace with sandbox container.
|
|
||||||
setOCINamespaces(&g, securityContext.GetNamespaceOptions(), sandboxPid)
|
|
||||||
|
|
||||||
supplementalGroups := securityContext.GetSupplementalGroups()
|
supplementalGroups := securityContext.GetSupplementalGroups()
|
||||||
for _, group := range supplementalGroups {
|
|
||||||
g.AddProcessAdditionalGid(uint32(group))
|
for pKey, pValue := range getPassthroughAnnotations(sandboxConfig.Annotations,
|
||||||
|
runtimePodAnnotations) {
|
||||||
|
specOpts = append(specOpts, customopts.WithAnnotation(pKey, pValue))
|
||||||
}
|
}
|
||||||
|
|
||||||
g.AddAnnotation(annotations.ContainerType, annotations.ContainerTypeContainer)
|
specOpts = append(specOpts,
|
||||||
g.AddAnnotation(annotations.SandboxID, sandboxID)
|
customopts.WithOOMScoreAdj(config, c.config.RestrictOOMScoreAdj),
|
||||||
|
customopts.WithPodNamespaces(securityContext, sandboxPid),
|
||||||
|
customopts.WithSupplementalGroups(supplementalGroups),
|
||||||
|
customopts.WithAnnotation(annotations.ContainerType, annotations.ContainerTypeContainer),
|
||||||
|
customopts.WithAnnotation(annotations.SandboxID, sandboxID),
|
||||||
|
)
|
||||||
|
|
||||||
return g.Config, nil
|
return runtimeSpec(id, specOpts...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// generateVolumeMounts sets up image volumes for container. Rely on the removal of container
|
// generateVolumeMounts sets up image volumes for container. Rely on the removal of container
|
||||||
@ -483,6 +463,22 @@ func (c *criService) generateVolumeMounts(containerRootDir string, criMounts []*
|
|||||||
func (c *criService) generateContainerMounts(sandboxID string, config *runtime.ContainerConfig) []*runtime.Mount {
|
func (c *criService) generateContainerMounts(sandboxID string, config *runtime.ContainerConfig) []*runtime.Mount {
|
||||||
var mounts []*runtime.Mount
|
var mounts []*runtime.Mount
|
||||||
securityContext := config.GetLinux().GetSecurityContext()
|
securityContext := config.GetLinux().GetSecurityContext()
|
||||||
|
if !isInCRIMounts(etcHostname, config.GetMounts()) {
|
||||||
|
// /etc/hostname is added since 1.1.6, 1.2.4 and 1.3.
|
||||||
|
// For in-place upgrade, the old sandbox doesn't have the hostname file,
|
||||||
|
// do not mount this in that case.
|
||||||
|
// TODO(random-liu): Remove the check and always mount this when
|
||||||
|
// containerd 1.1 and 1.2 are deprecated.
|
||||||
|
hostpath := c.getSandboxHostname(sandboxID)
|
||||||
|
if _, err := c.os.Stat(hostpath); err == nil {
|
||||||
|
mounts = append(mounts, &runtime.Mount{
|
||||||
|
ContainerPath: etcHostname,
|
||||||
|
HostPath: hostpath,
|
||||||
|
Readonly: securityContext.GetReadonlyRootfs(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if !isInCRIMounts(etcHosts, config.GetMounts()) {
|
if !isInCRIMounts(etcHosts, config.GetMounts()) {
|
||||||
mounts = append(mounts, &runtime.Mount{
|
mounts = append(mounts, &runtime.Mount{
|
||||||
ContainerPath: etcHosts,
|
ContainerPath: etcHosts,
|
||||||
@ -515,410 +511,14 @@ func (c *criService) generateContainerMounts(sandboxID string, config *runtime.C
|
|||||||
return mounts
|
return mounts
|
||||||
}
|
}
|
||||||
|
|
||||||
// setOCIProcessArgs sets process args. It returns error if the final arg list
|
// runtimeSpec returns a default runtime spec used in cri-containerd.
|
||||||
// is empty.
|
func runtimeSpec(id string, opts ...oci.SpecOpts) (*runtimespec.Spec, error) {
|
||||||
func setOCIProcessArgs(g *generate.Generator, config *runtime.ContainerConfig, imageConfig *imagespec.ImageConfig) error {
|
|
||||||
command, args := config.GetCommand(), config.GetArgs()
|
|
||||||
// The following logic is migrated from https://github.com/moby/moby/blob/master/daemon/commit.go
|
|
||||||
// TODO(random-liu): Clearly define the commands overwrite behavior.
|
|
||||||
if len(command) == 0 {
|
|
||||||
// Copy array to avoid data race.
|
|
||||||
if len(args) == 0 {
|
|
||||||
args = append([]string{}, imageConfig.Cmd...)
|
|
||||||
}
|
|
||||||
if command == nil {
|
|
||||||
command = append([]string{}, imageConfig.Entrypoint...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(command) == 0 && len(args) == 0 {
|
|
||||||
return errors.New("no command specified")
|
|
||||||
}
|
|
||||||
g.SetProcessArgs(append(command, args...))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// addImageEnvs adds environment variables from image config. It returns error if
|
|
||||||
// an invalid environment variable is encountered.
|
|
||||||
func addImageEnvs(g *generate.Generator, imageEnvs []string) error {
|
|
||||||
for _, e := range imageEnvs {
|
|
||||||
kv := strings.SplitN(e, "=", 2)
|
|
||||||
if len(kv) != 2 {
|
|
||||||
return errors.Errorf("invalid environment variable %q", e)
|
|
||||||
}
|
|
||||||
g.AddProcessEnv(kv[0], kv[1])
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func setOCIPrivileged(g *generate.Generator, config *runtime.ContainerConfig) error {
|
|
||||||
// Add all capabilities in privileged mode.
|
|
||||||
g.SetupPrivileged(true)
|
|
||||||
setOCIBindMountsPrivileged(g)
|
|
||||||
if err := setOCIDevicesPrivileged(g); err != nil {
|
|
||||||
return errors.Wrapf(err, "failed to set devices mapping %+v", config.GetDevices())
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func clearReadOnly(m *runtimespec.Mount) {
|
|
||||||
var opt []string
|
|
||||||
for _, o := range m.Options {
|
|
||||||
if o != "ro" {
|
|
||||||
opt = append(opt, o)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
m.Options = append(opt, "rw")
|
|
||||||
}
|
|
||||||
|
|
||||||
// addDevices set device mapping without privilege.
|
|
||||||
func (c *criService) addOCIDevices(g *generate.Generator, devs []*runtime.Device) error {
|
|
||||||
spec := g.Config
|
|
||||||
for _, device := range devs {
|
|
||||||
path, err := c.os.ResolveSymbolicLink(device.HostPath)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
dev, err := devices.DeviceFromPath(path, device.Permissions)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
rd := runtimespec.LinuxDevice{
|
|
||||||
Path: device.ContainerPath,
|
|
||||||
Type: string(dev.Type),
|
|
||||||
Major: dev.Major,
|
|
||||||
Minor: dev.Minor,
|
|
||||||
UID: &dev.Uid,
|
|
||||||
GID: &dev.Gid,
|
|
||||||
}
|
|
||||||
g.AddDevice(rd)
|
|
||||||
spec.Linux.Resources.Devices = append(spec.Linux.Resources.Devices, runtimespec.LinuxDeviceCgroup{
|
|
||||||
Allow: true,
|
|
||||||
Type: string(dev.Type),
|
|
||||||
Major: &dev.Major,
|
|
||||||
Minor: &dev.Minor,
|
|
||||||
Access: dev.Permissions,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// addDevices set device mapping with privilege.
|
|
||||||
func setOCIDevicesPrivileged(g *generate.Generator) error {
|
|
||||||
spec := g.Config
|
|
||||||
hostDevices, err := devices.HostDevices()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for _, hostDevice := range hostDevices {
|
|
||||||
rd := runtimespec.LinuxDevice{
|
|
||||||
Path: hostDevice.Path,
|
|
||||||
Type: string(hostDevice.Type),
|
|
||||||
Major: hostDevice.Major,
|
|
||||||
Minor: hostDevice.Minor,
|
|
||||||
UID: &hostDevice.Uid,
|
|
||||||
GID: &hostDevice.Gid,
|
|
||||||
}
|
|
||||||
if hostDevice.Major == 0 && hostDevice.Minor == 0 {
|
|
||||||
// Invalid device, most likely a symbolic link, skip it.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
g.AddDevice(rd)
|
|
||||||
}
|
|
||||||
spec.Linux.Resources.Devices = []runtimespec.LinuxDeviceCgroup{
|
|
||||||
{
|
|
||||||
Allow: true,
|
|
||||||
Access: "rwm",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// addOCIBindMounts adds bind mounts.
|
|
||||||
func (c *criService) addOCIBindMounts(g *generate.Generator, mounts []*runtime.Mount, mountLabel string) error {
|
|
||||||
// Sort mounts in number of parts. This ensures that high level mounts don't
|
|
||||||
// shadow other mounts.
|
|
||||||
sort.Sort(orderedMounts(mounts))
|
|
||||||
|
|
||||||
// Mount cgroup into the container as readonly, which inherits docker's behavior.
|
|
||||||
g.AddMount(runtimespec.Mount{
|
|
||||||
Source: "cgroup",
|
|
||||||
Destination: "/sys/fs/cgroup",
|
|
||||||
Type: "cgroup",
|
|
||||||
Options: []string{"nosuid", "noexec", "nodev", "relatime", "ro"},
|
|
||||||
})
|
|
||||||
|
|
||||||
// Copy all mounts from default mounts, except for
|
|
||||||
// - mounts overriden by supplied mount;
|
|
||||||
// - all mounts under /dev if a supplied /dev is present.
|
|
||||||
mountSet := make(map[string]struct{})
|
|
||||||
for _, m := range mounts {
|
|
||||||
mountSet[filepath.Clean(m.ContainerPath)] = struct{}{}
|
|
||||||
}
|
|
||||||
defaultMounts := g.Mounts()
|
|
||||||
g.ClearMounts()
|
|
||||||
for _, m := range defaultMounts {
|
|
||||||
dst := filepath.Clean(m.Destination)
|
|
||||||
if _, ok := mountSet[dst]; ok {
|
|
||||||
// filter out mount overridden by a supplied mount
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if _, mountDev := mountSet["/dev"]; mountDev && strings.HasPrefix(dst, "/dev/") {
|
|
||||||
// filter out everything under /dev if /dev is a supplied mount
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
g.AddMount(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, mount := range mounts {
|
|
||||||
dst := mount.GetContainerPath()
|
|
||||||
src := mount.GetHostPath()
|
|
||||||
// Create the host path if it doesn't exist.
|
|
||||||
// TODO(random-liu): Add CRI validation test for this case.
|
|
||||||
if _, err := c.os.Stat(src); err != nil {
|
|
||||||
if !os.IsNotExist(err) {
|
|
||||||
return errors.Wrapf(err, "failed to stat %q", src)
|
|
||||||
}
|
|
||||||
if err := c.os.MkdirAll(src, 0755); err != nil {
|
|
||||||
return errors.Wrapf(err, "failed to mkdir %q", src)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// TODO(random-liu): Add cri-containerd integration test or cri validation test
|
|
||||||
// for this.
|
|
||||||
src, err := c.os.ResolveSymbolicLink(src)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrapf(err, "failed to resolve symlink %q", src)
|
|
||||||
}
|
|
||||||
|
|
||||||
options := []string{"rbind"}
|
|
||||||
switch mount.GetPropagation() {
|
|
||||||
case runtime.MountPropagation_PROPAGATION_PRIVATE:
|
|
||||||
options = append(options, "rprivate")
|
|
||||||
// Since default root propogation in runc is rprivate ignore
|
|
||||||
// setting the root propagation
|
|
||||||
case runtime.MountPropagation_PROPAGATION_BIDIRECTIONAL:
|
|
||||||
if err := ensureShared(src, c.os.LookupMount); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
options = append(options, "rshared")
|
|
||||||
g.SetLinuxRootPropagation("rshared") // nolint: errcheck
|
|
||||||
case runtime.MountPropagation_PROPAGATION_HOST_TO_CONTAINER:
|
|
||||||
if err := ensureSharedOrSlave(src, c.os.LookupMount); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
options = append(options, "rslave")
|
|
||||||
if g.Config.Linux.RootfsPropagation != "rshared" &&
|
|
||||||
g.Config.Linux.RootfsPropagation != "rslave" {
|
|
||||||
g.SetLinuxRootPropagation("rslave") // nolint: errcheck
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
logrus.Warnf("Unknown propagation mode for hostPath %q", mount.HostPath)
|
|
||||||
options = append(options, "rprivate")
|
|
||||||
}
|
|
||||||
|
|
||||||
// NOTE(random-liu): we don't change all mounts to `ro` when root filesystem
|
|
||||||
// is readonly. This is different from docker's behavior, but make more sense.
|
|
||||||
if mount.GetReadonly() {
|
|
||||||
options = append(options, "ro")
|
|
||||||
} else {
|
|
||||||
options = append(options, "rw")
|
|
||||||
}
|
|
||||||
|
|
||||||
if mount.GetSelinuxRelabel() {
|
|
||||||
if err := label.Relabel(src, mountLabel, true); err != nil && err != unix.ENOTSUP {
|
|
||||||
return errors.Wrapf(err, "relabel %q with %q failed", src, mountLabel)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
g.AddMount(runtimespec.Mount{
|
|
||||||
Source: src,
|
|
||||||
Destination: dst,
|
|
||||||
Type: "bind",
|
|
||||||
Options: options,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func setOCIBindMountsPrivileged(g *generate.Generator) {
|
|
||||||
spec := g.Config
|
|
||||||
// clear readonly for /sys and cgroup
|
|
||||||
for i, m := range spec.Mounts {
|
|
||||||
if spec.Mounts[i].Destination == "/sys" {
|
|
||||||
clearReadOnly(&spec.Mounts[i])
|
|
||||||
}
|
|
||||||
if m.Type == "cgroup" {
|
|
||||||
clearReadOnly(&spec.Mounts[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
spec.Linux.ReadonlyPaths = nil
|
|
||||||
spec.Linux.MaskedPaths = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// setOCILinuxResourceCgroup set container cgroup resource limit.
|
|
||||||
func setOCILinuxResourceCgroup(g *generate.Generator, resources *runtime.LinuxContainerResources) {
|
|
||||||
if resources == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
g.SetLinuxResourcesCPUPeriod(uint64(resources.GetCpuPeriod()))
|
|
||||||
g.SetLinuxResourcesCPUQuota(resources.GetCpuQuota())
|
|
||||||
g.SetLinuxResourcesCPUShares(uint64(resources.GetCpuShares()))
|
|
||||||
g.SetLinuxResourcesMemoryLimit(resources.GetMemoryLimitInBytes())
|
|
||||||
g.SetLinuxResourcesCPUCpus(resources.GetCpusetCpus())
|
|
||||||
g.SetLinuxResourcesCPUMems(resources.GetCpusetMems())
|
|
||||||
}
|
|
||||||
|
|
||||||
// setOCILinuxResourceOOMScoreAdj set container OOMScoreAdj resource limit.
|
|
||||||
func setOCILinuxResourceOOMScoreAdj(g *generate.Generator, resources *runtime.LinuxContainerResources, restrictOOMScoreAdjFlag bool) error {
|
|
||||||
if resources == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
adj := int(resources.GetOomScoreAdj())
|
|
||||||
if restrictOOMScoreAdjFlag {
|
|
||||||
var err error
|
|
||||||
adj, err = restrictOOMScoreAdj(adj)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
g.SetProcessOOMScoreAdj(adj)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// getOCICapabilitiesList returns a list of all available capabilities.
|
|
||||||
func getOCICapabilitiesList() []string {
|
|
||||||
var caps []string
|
|
||||||
for _, cap := range capability.List() {
|
|
||||||
if cap > validate.LastCap() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
caps = append(caps, "CAP_"+strings.ToUpper(cap.String()))
|
|
||||||
}
|
|
||||||
return caps
|
|
||||||
}
|
|
||||||
|
|
||||||
// Adds capabilities to all sets relevant to root (bounding, permitted, effective, inheritable)
|
|
||||||
func addProcessRootCapability(g *generate.Generator, c string) error {
|
|
||||||
if err := g.AddProcessCapabilityBounding(c); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := g.AddProcessCapabilityPermitted(c); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := g.AddProcessCapabilityEffective(c); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := g.AddProcessCapabilityInheritable(c); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Drops capabilities to all sets relevant to root (bounding, permitted, effective, inheritable)
|
|
||||||
func dropProcessRootCapability(g *generate.Generator, c string) error {
|
|
||||||
if err := g.DropProcessCapabilityBounding(c); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := g.DropProcessCapabilityPermitted(c); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := g.DropProcessCapabilityEffective(c); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := g.DropProcessCapabilityInheritable(c); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// setOCICapabilities adds/drops process capabilities.
|
|
||||||
func setOCICapabilities(g *generate.Generator, capabilities *runtime.Capability) error {
|
|
||||||
if capabilities == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add/drop all capabilities if "all" is specified, so that
|
|
||||||
// following individual add/drop could still work. E.g.
|
|
||||||
// AddCapabilities: []string{"ALL"}, DropCapabilities: []string{"CHOWN"}
|
|
||||||
// will be all capabilities without `CAP_CHOWN`.
|
|
||||||
if util.InStringSlice(capabilities.GetAddCapabilities(), "ALL") {
|
|
||||||
for _, c := range getOCICapabilitiesList() {
|
|
||||||
if err := addProcessRootCapability(g, c); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if util.InStringSlice(capabilities.GetDropCapabilities(), "ALL") {
|
|
||||||
for _, c := range getOCICapabilitiesList() {
|
|
||||||
if err := dropProcessRootCapability(g, c); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, c := range capabilities.GetAddCapabilities() {
|
|
||||||
if strings.ToUpper(c) == "ALL" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Capabilities in CRI doesn't have `CAP_` prefix, so add it.
|
|
||||||
if err := addProcessRootCapability(g, "CAP_"+strings.ToUpper(c)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, c := range capabilities.GetDropCapabilities() {
|
|
||||||
if strings.ToUpper(c) == "ALL" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if err := dropProcessRootCapability(g, "CAP_"+strings.ToUpper(c)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// setOCINamespaces sets namespaces.
|
|
||||||
func setOCINamespaces(g *generate.Generator, namespaces *runtime.NamespaceOption, sandboxPid uint32) {
|
|
||||||
g.AddOrReplaceLinuxNamespace(string(runtimespec.NetworkNamespace), getNetworkNamespace(sandboxPid)) // nolint: errcheck
|
|
||||||
g.AddOrReplaceLinuxNamespace(string(runtimespec.IPCNamespace), getIPCNamespace(sandboxPid)) // nolint: errcheck
|
|
||||||
g.AddOrReplaceLinuxNamespace(string(runtimespec.UTSNamespace), getUTSNamespace(sandboxPid)) // nolint: errcheck
|
|
||||||
// Do not share pid namespace if namespace mode is CONTAINER.
|
|
||||||
if namespaces.GetPid() != runtime.NamespaceMode_CONTAINER {
|
|
||||||
g.AddOrReplaceLinuxNamespace(string(runtimespec.PIDNamespace), getPIDNamespace(sandboxPid)) // nolint: errcheck
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// defaultRuntimeSpec returns a default runtime spec used in cri-containerd.
|
|
||||||
func defaultRuntimeSpec(id string) (*runtimespec.Spec, error) {
|
|
||||||
// GenerateSpec needs namespace.
|
// GenerateSpec needs namespace.
|
||||||
ctx := ctrdutil.NamespacedContext()
|
ctx := ctrdutil.NamespacedContext()
|
||||||
spec, err := oci.GenerateSpec(ctx, nil, &containers.Container{ID: id})
|
spec, err := oci.GenerateSpec(ctx, nil, &containers.Container{ID: id}, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove `/run` mount
|
|
||||||
// TODO(random-liu): Mount tmpfs for /run and handle copy-up.
|
|
||||||
var mounts []runtimespec.Mount
|
|
||||||
for _, mount := range spec.Mounts {
|
|
||||||
if mount.Destination == "/run" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
mounts = append(mounts, mount)
|
|
||||||
}
|
|
||||||
spec.Mounts = mounts
|
|
||||||
|
|
||||||
// Make sure no default seccomp/apparmor is specified
|
|
||||||
if spec.Process != nil {
|
|
||||||
spec.Process.ApparmorProfile = ""
|
|
||||||
}
|
|
||||||
if spec.Linux != nil {
|
|
||||||
spec.Linux.Seccomp = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove default rlimits (See issue #515)
|
|
||||||
spec.Process.Rlimits = nil
|
|
||||||
|
|
||||||
return spec, nil
|
return spec, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -987,42 +587,6 @@ func generateApparmorSpecOpts(apparmorProf string, privileged, apparmorEnabled b
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure mount point on which path is mounted, is shared.
|
|
||||||
func ensureShared(path string, lookupMount func(string) (mount.Info, error)) error {
|
|
||||||
mountInfo, err := lookupMount(path)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make sure source mount point is shared.
|
|
||||||
optsSplit := strings.Split(mountInfo.Optional, " ")
|
|
||||||
for _, opt := range optsSplit {
|
|
||||||
if strings.HasPrefix(opt, "shared:") {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return errors.Errorf("path %q is mounted on %q but it is not a shared mount", path, mountInfo.Mountpoint)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure mount point on which path is mounted, is either shared or slave.
|
|
||||||
func ensureSharedOrSlave(path string, lookupMount func(string) (mount.Info, error)) error {
|
|
||||||
mountInfo, err := lookupMount(path)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// Make sure source mount point is shared.
|
|
||||||
optsSplit := strings.Split(mountInfo.Optional, " ")
|
|
||||||
for _, opt := range optsSplit {
|
|
||||||
if strings.HasPrefix(opt, "shared:") {
|
|
||||||
return nil
|
|
||||||
} else if strings.HasPrefix(opt, "master:") {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return errors.Errorf("path %q is mounted on %q but it is not a shared or slave mount", path, mountInfo.Mountpoint)
|
|
||||||
}
|
|
||||||
|
|
||||||
// generateUserString generates valid user string based on OCI Image Spec v1.0.0.
|
// generateUserString generates valid user string based on OCI Image Spec v1.0.0.
|
||||||
// TODO(random-liu): Add group name support in CRI.
|
// TODO(random-liu): Add group name support in CRI.
|
||||||
func generateUserString(username string, uid, gid *runtime.Int64Value) (string, error) {
|
func generateUserString(username string, uid, gid *runtime.Int64Value) (string, error) {
|
||||||
@ -1047,25 +611,3 @@ func generateUserString(username string, uid, gid *runtime.Int64Value) (string,
|
|||||||
}
|
}
|
||||||
return userstr, nil
|
return userstr, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// mergeMounts merge CRI mounts with extra mounts. If a mount destination
|
|
||||||
// is mounted by both a CRI mount and an extra mount, the CRI mount will
|
|
||||||
// be kept.
|
|
||||||
func mergeMounts(criMounts, extraMounts []*runtime.Mount) []*runtime.Mount {
|
|
||||||
var mounts []*runtime.Mount
|
|
||||||
mounts = append(mounts, criMounts...)
|
|
||||||
// Copy all mounts from extra mounts, except for mounts overriden by CRI.
|
|
||||||
for _, e := range extraMounts {
|
|
||||||
found := false
|
|
||||||
for _, c := range criMounts {
|
|
||||||
if filepath.Clean(e.ContainerPath) == filepath.Clean(c.ContainerPath) {
|
|
||||||
found = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !found {
|
|
||||||
mounts = append(mounts, e)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return mounts
|
|
||||||
}
|
|
||||||
|
15
vendor/github.com/containerd/cri/pkg/server/container_execsync.go
generated
vendored
15
vendor/github.com/containerd/cri/pkg/server/container_execsync.go
generated
vendored
@ -24,6 +24,7 @@ import (
|
|||||||
"github.com/containerd/containerd"
|
"github.com/containerd/containerd"
|
||||||
containerdio "github.com/containerd/containerd/cio"
|
containerdio "github.com/containerd/containerd/cio"
|
||||||
"github.com/containerd/containerd/errdefs"
|
"github.com/containerd/containerd/errdefs"
|
||||||
|
"github.com/containerd/containerd/oci"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
@ -99,14 +100,16 @@ func (c *criService) execInContainer(ctx context.Context, id string, opts execOp
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to load task")
|
return nil, errors.Wrap(err, "failed to load task")
|
||||||
}
|
}
|
||||||
if opts.tty {
|
|
||||||
g := newSpecGenerator(spec)
|
|
||||||
g.AddProcessEnv("TERM", "xterm")
|
|
||||||
spec = g.Config
|
|
||||||
}
|
|
||||||
pspec := spec.Process
|
pspec := spec.Process
|
||||||
pspec.Args = opts.cmd
|
|
||||||
pspec.Terminal = opts.tty
|
pspec.Terminal = opts.tty
|
||||||
|
if opts.tty {
|
||||||
|
if err := oci.WithEnv([]string{"TERM=xterm"})(nil, nil, nil, spec); err != nil {
|
||||||
|
return nil, errors.Wrap(err, "add TERM env var to spec")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pspec.Args = opts.cmd
|
||||||
|
|
||||||
if opts.stdout == nil {
|
if opts.stdout == nil {
|
||||||
opts.stdout = cio.NewDiscardLogger()
|
opts.stdout = cio.NewDiscardLogger()
|
||||||
|
7
vendor/github.com/containerd/cri/pkg/server/container_remove.go
generated
vendored
7
vendor/github.com/containerd/cri/pkg/server/container_remove.go
generated
vendored
@ -98,9 +98,12 @@ func (c *criService) RemoveContainer(ctx context.Context, r *runtime.RemoveConta
|
|||||||
// container will not be started or removed again.
|
// container will not be started or removed again.
|
||||||
func setContainerRemoving(container containerstore.Container) error {
|
func setContainerRemoving(container containerstore.Container) error {
|
||||||
return container.Status.Update(func(status containerstore.Status) (containerstore.Status, error) {
|
return container.Status.Update(func(status containerstore.Status) (containerstore.Status, error) {
|
||||||
// Do not remove container if it's still running.
|
// Do not remove container if it's still running or unknown.
|
||||||
if status.State() == runtime.ContainerState_CONTAINER_RUNNING {
|
if status.State() == runtime.ContainerState_CONTAINER_RUNNING {
|
||||||
return status, errors.New("container is still running")
|
return status, errors.New("container is still running, to stop first")
|
||||||
|
}
|
||||||
|
if status.State() == runtime.ContainerState_CONTAINER_UNKNOWN {
|
||||||
|
return status, errors.New("container state is unknown, to stop first")
|
||||||
}
|
}
|
||||||
if status.Removing {
|
if status.Removing {
|
||||||
return status, errors.New("container is already in removing state")
|
return status, errors.New("container is already in removing state")
|
||||||
|
9
vendor/github.com/containerd/cri/pkg/server/container_status.go
generated
vendored
9
vendor/github.com/containerd/cri/pkg/server/container_status.go
generated
vendored
@ -60,6 +60,15 @@ func (c *criService) ContainerStatus(ctx context.Context, r *runtime.ContainerSt
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
status := toCRIContainerStatus(container, spec, imageRef)
|
status := toCRIContainerStatus(container, spec, imageRef)
|
||||||
|
if status.GetCreatedAt() == 0 {
|
||||||
|
// CRI doesn't allow CreatedAt == 0.
|
||||||
|
info, err := container.Container.Info(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "failed to get CreatedAt in %q state", status.State)
|
||||||
|
}
|
||||||
|
status.CreatedAt = info.CreatedAt.UnixNano()
|
||||||
|
}
|
||||||
|
|
||||||
info, err := toCRIContainerInfo(ctx, container, r.GetVerbose())
|
info, err := toCRIContainerInfo(ctx, container, r.GetVerbose())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to get verbose container info")
|
return nil, errors.Wrap(err, "failed to get verbose container info")
|
||||||
|
67
vendor/github.com/containerd/cri/pkg/server/container_stop.go
generated
vendored
67
vendor/github.com/containerd/cri/pkg/server/container_stop.go
generated
vendored
@ -19,8 +19,9 @@ package server
|
|||||||
import (
|
import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd"
|
||||||
|
eventtypes "github.com/containerd/containerd/api/events"
|
||||||
"github.com/containerd/containerd/errdefs"
|
"github.com/containerd/containerd/errdefs"
|
||||||
"github.com/docker/docker/pkg/signal"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
@ -60,8 +61,9 @@ func (c *criService) stopContainer(ctx context.Context, container containerstore
|
|||||||
// Return without error if container is not running. This makes sure that
|
// Return without error if container is not running. This makes sure that
|
||||||
// stop only takes real action after the container is started.
|
// stop only takes real action after the container is started.
|
||||||
state := container.Status.Get().State()
|
state := container.Status.Get().State()
|
||||||
if state != runtime.ContainerState_CONTAINER_RUNNING {
|
if state != runtime.ContainerState_CONTAINER_RUNNING &&
|
||||||
logrus.Infof("Container to stop %q is not running, current state %q",
|
state != runtime.ContainerState_CONTAINER_UNKNOWN {
|
||||||
|
logrus.Infof("Container to stop %q must be in running or unknown state, current state %q",
|
||||||
id, criContainerStateToString(state))
|
id, criContainerStateToString(state))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -69,10 +71,40 @@ func (c *criService) stopContainer(ctx context.Context, container containerstore
|
|||||||
task, err := container.Container.Task(ctx, nil)
|
task, err := container.Container.Task(ctx, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !errdefs.IsNotFound(err) {
|
if !errdefs.IsNotFound(err) {
|
||||||
return errors.Wrapf(err, "failed to stop container, task not found for container %q", id)
|
return errors.Wrapf(err, "failed to get task for container %q", id)
|
||||||
}
|
}
|
||||||
|
// Don't return for unknown state, some cleanup needs to be done.
|
||||||
|
if state != runtime.ContainerState_CONTAINER_UNKNOWN {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
// Task is an interface, explicitly set it to nil just in case.
|
||||||
|
task = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle unknown state.
|
||||||
|
if state == runtime.ContainerState_CONTAINER_UNKNOWN {
|
||||||
|
status, err := getTaskStatus(ctx, task)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to get task status for %q", id)
|
||||||
|
}
|
||||||
|
switch status.Status {
|
||||||
|
case containerd.Running, containerd.Created:
|
||||||
|
// The task is still running, continue stopping the task.
|
||||||
|
case containerd.Stopped:
|
||||||
|
// The task has exited. If the task exited after containerd
|
||||||
|
// started, the event monitor will receive its exit event; if it
|
||||||
|
// exited before containerd started, the event monitor will never
|
||||||
|
// receive its exit event.
|
||||||
|
// However, we can't tell that because the task state was not
|
||||||
|
// successfully loaded during containerd start (container is
|
||||||
|
// in UNKNOWN state).
|
||||||
|
// So always do cleanup here, just in case that we've missed the
|
||||||
|
// exit event.
|
||||||
|
return cleanupUnknownContainer(ctx, id, status, container)
|
||||||
|
default:
|
||||||
|
return errors.Wrapf(err, "unsupported task status %q", status.Status)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// We only need to kill the task. The event handler will Delete the
|
// We only need to kill the task. The event handler will Delete the
|
||||||
// task from containerd after it handles the Exited event.
|
// task from containerd after it handles the Exited event.
|
||||||
@ -101,7 +133,7 @@ func (c *criService) stopContainer(ctx context.Context, container containerstore
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
sig, err := signal.ParseSignal(stopSignal)
|
sig, err := containerd.ParseSignal(stopSignal)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "failed to parse stop signal %q", stopSignal)
|
return errors.Wrapf(err, "failed to parse stop signal %q", stopSignal)
|
||||||
}
|
}
|
||||||
@ -110,8 +142,9 @@ func (c *criService) stopContainer(ctx context.Context, container containerstore
|
|||||||
return errors.Wrapf(err, "failed to stop container %q", id)
|
return errors.Wrapf(err, "failed to stop container %q", id)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = c.waitContainerStop(ctx, container, timeout); err == nil {
|
if err = c.waitContainerStop(ctx, container, timeout); err == nil || errors.Cause(err) == ctx.Err() {
|
||||||
return nil
|
// Do not SIGKILL container if the context is cancelled.
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
logrus.WithError(err).Errorf("An error occurs during waiting for container %q to be stopped", id)
|
logrus.WithError(err).Errorf("An error occurs during waiting for container %q to be stopped", id)
|
||||||
}
|
}
|
||||||
@ -134,10 +167,28 @@ func (c *criService) waitContainerStop(ctx context.Context, container containers
|
|||||||
defer timeoutTimer.Stop()
|
defer timeoutTimer.Stop()
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return errors.Errorf("wait container %q is cancelled", container.ID)
|
return errors.Wrapf(ctx.Err(), "wait container %q is cancelled", container.ID)
|
||||||
case <-timeoutTimer.C:
|
case <-timeoutTimer.C:
|
||||||
return errors.Errorf("wait container %q stop timeout", container.ID)
|
return errors.Errorf("wait container %q stop timeout", container.ID)
|
||||||
case <-container.Stopped():
|
case <-container.Stopped():
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// cleanupUnknownContainer cleanup stopped container in unknown state.
|
||||||
|
func cleanupUnknownContainer(ctx context.Context, id string, status containerd.Status,
|
||||||
|
cntr containerstore.Container) error {
|
||||||
|
// Reuse handleContainerExit to do the cleanup.
|
||||||
|
// NOTE(random-liu): If the task did exit after containerd started, both
|
||||||
|
// the event monitor and the cleanup function would update the container
|
||||||
|
// state. The final container state will be whatever being updated first.
|
||||||
|
// There is no way to completely avoid this race condition, and for best
|
||||||
|
// effort unknown state container cleanup, this seems acceptable.
|
||||||
|
return handleContainerExit(ctx, &eventtypes.TaskExit{
|
||||||
|
ContainerID: id,
|
||||||
|
ID: id,
|
||||||
|
Pid: 0,
|
||||||
|
ExitStatus: status.ExitStatus,
|
||||||
|
ExitedAt: status.ExitTime,
|
||||||
|
}, cntr)
|
||||||
|
}
|
||||||
|
27
vendor/github.com/containerd/cri/pkg/server/container_update_resources.go
generated
vendored
27
vendor/github.com/containerd/cri/pkg/server/container_update_resources.go
generated
vendored
@ -29,6 +29,7 @@ import (
|
|||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||||
|
|
||||||
|
"github.com/containerd/cri/pkg/containerd/opts"
|
||||||
ctrdutil "github.com/containerd/cri/pkg/containerd/util"
|
ctrdutil "github.com/containerd/cri/pkg/containerd/util"
|
||||||
containerstore "github.com/containerd/cri/pkg/store/container"
|
containerstore "github.com/containerd/cri/pkg/store/container"
|
||||||
"github.com/containerd/cri/pkg/util"
|
"github.com/containerd/cri/pkg/util"
|
||||||
@ -135,27 +136,11 @@ func updateOCILinuxResource(spec *runtimespec.Spec, new *runtime.LinuxContainerR
|
|||||||
if err := util.DeepCopy(&cloned, spec); err != nil {
|
if err := util.DeepCopy(&cloned, spec); err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to deep copy")
|
return nil, errors.Wrap(err, "failed to deep copy")
|
||||||
}
|
}
|
||||||
g := newSpecGenerator(&cloned)
|
if cloned.Linux == nil {
|
||||||
|
cloned.Linux = &runtimespec.Linux{}
|
||||||
if new.GetCpuPeriod() != 0 {
|
|
||||||
g.SetLinuxResourcesCPUPeriod(uint64(new.GetCpuPeriod()))
|
|
||||||
}
|
}
|
||||||
if new.GetCpuQuota() != 0 {
|
if err := opts.WithResources(new)(nil, nil, nil, &cloned); err != nil {
|
||||||
g.SetLinuxResourcesCPUQuota(new.GetCpuQuota())
|
return nil, errors.Wrap(err, "unable to set linux container resources")
|
||||||
}
|
}
|
||||||
if new.GetCpuShares() != 0 {
|
return &cloned, nil
|
||||||
g.SetLinuxResourcesCPUShares(uint64(new.GetCpuShares()))
|
|
||||||
}
|
|
||||||
if new.GetMemoryLimitInBytes() != 0 {
|
|
||||||
g.SetLinuxResourcesMemoryLimit(new.GetMemoryLimitInBytes())
|
|
||||||
}
|
|
||||||
// OOMScore is not updatable.
|
|
||||||
if new.GetCpusetCpus() != "" {
|
|
||||||
g.SetLinuxResourcesCPUCpus(new.GetCpusetCpus())
|
|
||||||
}
|
|
||||||
if new.GetCpusetMems() != "" {
|
|
||||||
g.SetLinuxResourcesCPUMems(new.GetCpusetMems())
|
|
||||||
}
|
|
||||||
|
|
||||||
return g.Config, nil
|
|
||||||
}
|
}
|
||||||
|
19
vendor/github.com/containerd/cri/pkg/server/events.go
generated
vendored
19
vendor/github.com/containerd/cri/pkg/server/events.go
generated
vendored
@ -213,7 +213,7 @@ func (em *eventMonitor) handleEvent(any interface{}) error {
|
|||||||
} else if err != store.ErrNotExist {
|
} else if err != store.ErrNotExist {
|
||||||
return errors.Wrap(err, "can't find container for TaskExit event")
|
return errors.Wrap(err, "can't find container for TaskExit event")
|
||||||
}
|
}
|
||||||
// Use GetAll to include sandbox in unknown state.
|
// Use GetAll to include sandbox in init state.
|
||||||
sb, err := em.c.sandboxStore.GetAll(e.ID)
|
sb, err := em.c.sandboxStore.GetAll(e.ID)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if err := handleSandboxExit(ctx, e, sb); err != nil {
|
if err := handleSandboxExit(ctx, e, sb); err != nil {
|
||||||
@ -260,7 +260,16 @@ func handleContainerExit(ctx context.Context, e *eventtypes.TaskExit, cntr conta
|
|||||||
// Attach container IO so that `Delete` could cleanup the stream properly.
|
// Attach container IO so that `Delete` could cleanup the stream properly.
|
||||||
task, err := cntr.Container.Task(ctx,
|
task, err := cntr.Container.Task(ctx,
|
||||||
func(*containerdio.FIFOSet) (containerdio.IO, error) {
|
func(*containerdio.FIFOSet) (containerdio.IO, error) {
|
||||||
|
// We can't directly return cntr.IO here, because
|
||||||
|
// even if cntr.IO is nil, the cio.IO interface
|
||||||
|
// is not.
|
||||||
|
// See https://tour.golang.org/methods/12:
|
||||||
|
// Note that an interface value that holds a nil
|
||||||
|
// concrete value is itself non-nil.
|
||||||
|
if cntr.IO != nil {
|
||||||
return cntr.IO, nil
|
return cntr.IO, nil
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -313,13 +322,13 @@ func handleSandboxExit(ctx context.Context, e *eventtypes.TaskExit, sb sandboxst
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
err = sb.Status.Update(func(status sandboxstore.Status) (sandboxstore.Status, error) {
|
err = sb.Status.Update(func(status sandboxstore.Status) (sandboxstore.Status, error) {
|
||||||
// NOTE(random-liu): We SHOULD NOT change UNKNOWN state here.
|
// NOTE(random-liu): We SHOULD NOT change INIT state here.
|
||||||
// If sandbox state is UNKNOWN when event monitor receives an TaskExit event,
|
// If sandbox state is INIT when event monitor receives an TaskExit event,
|
||||||
// it means that sandbox start has failed. In that case, `RunPodSandbox` will
|
// it means that sandbox start has failed. In that case, `RunPodSandbox` will
|
||||||
// cleanup everything immediately.
|
// cleanup everything immediately.
|
||||||
// Once sandbox state goes out of UNKNOWN, it becomes visable to the user, which
|
// Once sandbox state goes out of INIT, it becomes visable to the user, which
|
||||||
// is not what we want.
|
// is not what we want.
|
||||||
if status.State != sandboxstore.StateUnknown {
|
if status.State != sandboxstore.StateInit {
|
||||||
status.State = sandboxstore.StateNotReady
|
status.State = sandboxstore.StateNotReady
|
||||||
}
|
}
|
||||||
status.Pid = 0
|
status.Pid = 0
|
||||||
|
174
vendor/github.com/containerd/cri/pkg/server/helpers.go
generated
vendored
174
vendor/github.com/containerd/cri/pkg/server/helpers.go
generated
vendored
@ -18,24 +18,22 @@ package server
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/BurntSushi/toml"
|
"github.com/BurntSushi/toml"
|
||||||
|
"github.com/containerd/containerd"
|
||||||
"github.com/containerd/containerd/containers"
|
"github.com/containerd/containerd/containers"
|
||||||
|
"github.com/containerd/containerd/errdefs"
|
||||||
"github.com/containerd/containerd/runtime/linux/runctypes"
|
"github.com/containerd/containerd/runtime/linux/runctypes"
|
||||||
runcoptions "github.com/containerd/containerd/runtime/v2/runc/options"
|
runcoptions "github.com/containerd/containerd/runtime/v2/runc/options"
|
||||||
"github.com/containerd/typeurl"
|
"github.com/containerd/typeurl"
|
||||||
"github.com/docker/distribution/reference"
|
"github.com/docker/distribution/reference"
|
||||||
imagedigest "github.com/opencontainers/go-digest"
|
imagedigest "github.com/opencontainers/go-digest"
|
||||||
runtimespec "github.com/opencontainers/runtime-spec/specs-go"
|
|
||||||
"github.com/opencontainers/runtime-tools/generate"
|
|
||||||
"github.com/opencontainers/selinux/go-selinux"
|
|
||||||
"github.com/opencontainers/selinux/go-selinux/label"
|
"github.com/opencontainers/selinux/go-selinux/label"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
@ -44,8 +42,9 @@ import (
|
|||||||
runtimeoptions "github.com/containerd/cri/pkg/api/runtimeoptions/v1"
|
runtimeoptions "github.com/containerd/cri/pkg/api/runtimeoptions/v1"
|
||||||
criconfig "github.com/containerd/cri/pkg/config"
|
criconfig "github.com/containerd/cri/pkg/config"
|
||||||
"github.com/containerd/cri/pkg/store"
|
"github.com/containerd/cri/pkg/store"
|
||||||
|
containerstore "github.com/containerd/cri/pkg/store/container"
|
||||||
imagestore "github.com/containerd/cri/pkg/store/image"
|
imagestore "github.com/containerd/cri/pkg/store/image"
|
||||||
"github.com/containerd/cri/pkg/util"
|
sandboxstore "github.com/containerd/cri/pkg/store/sandbox"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -65,8 +64,6 @@ const (
|
|||||||
const (
|
const (
|
||||||
// defaultSandboxOOMAdj is default omm adj for sandbox container. (kubernetes#47938).
|
// defaultSandboxOOMAdj is default omm adj for sandbox container. (kubernetes#47938).
|
||||||
defaultSandboxOOMAdj = -998
|
defaultSandboxOOMAdj = -998
|
||||||
// defaultSandboxCPUshares is default cpu shares for sandbox container.
|
|
||||||
defaultSandboxCPUshares = 2
|
|
||||||
// defaultShmSize is the default size of the sandbox shm.
|
// defaultShmSize is the default size of the sandbox shm.
|
||||||
defaultShmSize = int64(1024 * 1024 * 64)
|
defaultShmSize = int64(1024 * 1024 * 64)
|
||||||
// relativeRootfsPath is the rootfs path relative to bundle path.
|
// relativeRootfsPath is the rootfs path relative to bundle path.
|
||||||
@ -82,18 +79,12 @@ const (
|
|||||||
maxDNSSearches = 6
|
maxDNSSearches = 6
|
||||||
// Delimiter used to construct container/sandbox names.
|
// Delimiter used to construct container/sandbox names.
|
||||||
nameDelimiter = "_"
|
nameDelimiter = "_"
|
||||||
// netNSFormat is the format of network namespace of a process.
|
|
||||||
netNSFormat = "/proc/%v/ns/net"
|
|
||||||
// ipcNSFormat is the format of ipc namespace of a process.
|
|
||||||
ipcNSFormat = "/proc/%v/ns/ipc"
|
|
||||||
// utsNSFormat is the format of uts namespace of a process.
|
|
||||||
utsNSFormat = "/proc/%v/ns/uts"
|
|
||||||
// pidNSFormat is the format of pid namespace of a process.
|
|
||||||
pidNSFormat = "/proc/%v/ns/pid"
|
|
||||||
// devShm is the default path of /dev/shm.
|
// devShm is the default path of /dev/shm.
|
||||||
devShm = "/dev/shm"
|
devShm = "/dev/shm"
|
||||||
// etcHosts is the default path of /etc/hosts file.
|
// etcHosts is the default path of /etc/hosts file.
|
||||||
etcHosts = "/etc/hosts"
|
etcHosts = "/etc/hosts"
|
||||||
|
// etcHostname is the default path of /etc/hostname file.
|
||||||
|
etcHostname = "/etc/hostname"
|
||||||
// resolvConfPath is the abs path of resolv.conf on host or container.
|
// resolvConfPath is the abs path of resolv.conf on host or container.
|
||||||
resolvConfPath = "/etc/resolv.conf"
|
resolvConfPath = "/etc/resolv.conf"
|
||||||
// hostnameEnv is the key for HOSTNAME env.
|
// hostnameEnv is the key for HOSTNAME env.
|
||||||
@ -194,6 +185,11 @@ func (c *criService) getVolatileContainerRootDir(id string) string {
|
|||||||
return filepath.Join(c.config.StateDir, containersDir, id)
|
return filepath.Join(c.config.StateDir, containersDir, id)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getSandboxHostname returns the hostname file path inside the sandbox root directory.
|
||||||
|
func (c *criService) getSandboxHostname(id string) string {
|
||||||
|
return filepath.Join(c.getSandboxRootDir(id), "hostname")
|
||||||
|
}
|
||||||
|
|
||||||
// getSandboxHosts returns the hosts file path inside the sandbox root directory.
|
// getSandboxHosts returns the hosts file path inside the sandbox root directory.
|
||||||
func (c *criService) getSandboxHosts(id string) string {
|
func (c *criService) getSandboxHosts(id string) string {
|
||||||
return filepath.Join(c.getSandboxRootDir(id), "hosts")
|
return filepath.Join(c.getSandboxRootDir(id), "hosts")
|
||||||
@ -209,26 +205,6 @@ func (c *criService) getSandboxDevShm(id string) string {
|
|||||||
return filepath.Join(c.getVolatileSandboxRootDir(id), "shm")
|
return filepath.Join(c.getVolatileSandboxRootDir(id), "shm")
|
||||||
}
|
}
|
||||||
|
|
||||||
// getNetworkNamespace returns the network namespace of a process.
|
|
||||||
func getNetworkNamespace(pid uint32) string {
|
|
||||||
return fmt.Sprintf(netNSFormat, pid)
|
|
||||||
}
|
|
||||||
|
|
||||||
// getIPCNamespace returns the ipc namespace of a process.
|
|
||||||
func getIPCNamespace(pid uint32) string {
|
|
||||||
return fmt.Sprintf(ipcNSFormat, pid)
|
|
||||||
}
|
|
||||||
|
|
||||||
// getUTSNamespace returns the uts namespace of a process.
|
|
||||||
func getUTSNamespace(pid uint32) string {
|
|
||||||
return fmt.Sprintf(utsNSFormat, pid)
|
|
||||||
}
|
|
||||||
|
|
||||||
// getPIDNamespace returns the pid namespace of a process.
|
|
||||||
func getPIDNamespace(pid uint32) string {
|
|
||||||
return fmt.Sprintf(pidNSFormat, pid)
|
|
||||||
}
|
|
||||||
|
|
||||||
// criContainerStateToString formats CRI container state to string.
|
// criContainerStateToString formats CRI container state to string.
|
||||||
func criContainerStateToString(state runtime.ContainerState) string {
|
func criContainerStateToString(state runtime.ContainerState) string {
|
||||||
return runtime.ContainerState_name[int32(state)]
|
return runtime.ContainerState_name[int32(state)]
|
||||||
@ -259,7 +235,7 @@ func (c *criService) localResolve(refOrID string) (imagestore.Image, error) {
|
|||||||
return func(ref string) string {
|
return func(ref string) string {
|
||||||
// ref is not image id, try to resolve it locally.
|
// ref is not image id, try to resolve it locally.
|
||||||
// TODO(random-liu): Handle this error better for debugging.
|
// TODO(random-liu): Handle this error better for debugging.
|
||||||
normalized, err := util.NormalizeImageRef(ref)
|
normalized, err := reference.ParseDockerRef(ref)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
@ -345,7 +321,12 @@ func initSelinuxOpts(selinuxOpt *runtime.SELinuxOption) (string, string, error)
|
|||||||
selinuxOpt.GetRole(),
|
selinuxOpt.GetRole(),
|
||||||
selinuxOpt.GetType(),
|
selinuxOpt.GetType(),
|
||||||
selinuxOpt.GetLevel())
|
selinuxOpt.GetLevel())
|
||||||
return label.InitLabels(selinux.DupSecOpt(labelOpts))
|
|
||||||
|
options, err := label.DupSecOpt(labelOpts)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
return label.InitLabels(options)
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkSelinuxLevel(level string) (bool, error) {
|
func checkSelinuxLevel(level string) (bool, error) {
|
||||||
@ -363,7 +344,7 @@ func checkSelinuxLevel(level string) (bool, error) {
|
|||||||
// isInCRIMounts checks whether a destination is in CRI mount list.
|
// isInCRIMounts checks whether a destination is in CRI mount list.
|
||||||
func isInCRIMounts(dst string, mounts []*runtime.Mount) bool {
|
func isInCRIMounts(dst string, mounts []*runtime.Mount) bool {
|
||||||
for _, m := range mounts {
|
for _, m := range mounts {
|
||||||
if m.ContainerPath == dst {
|
if filepath.Clean(m.ContainerPath) == filepath.Clean(dst) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -386,13 +367,6 @@ func buildLabels(configLabels map[string]string, containerType string) map[strin
|
|||||||
return labels
|
return labels
|
||||||
}
|
}
|
||||||
|
|
||||||
// newSpecGenerator creates a new spec generator for the runtime spec.
|
|
||||||
func newSpecGenerator(spec *runtimespec.Spec) generate.Generator {
|
|
||||||
g := generate.NewFromSpec(spec)
|
|
||||||
g.HostSpecific = true
|
|
||||||
return g
|
|
||||||
}
|
|
||||||
|
|
||||||
func getPodCNILabels(id string, config *runtime.PodSandboxConfig) map[string]string {
|
func getPodCNILabels(id string, config *runtime.PodSandboxConfig) map[string]string {
|
||||||
return map[string]string{
|
return map[string]string{
|
||||||
"K8S_POD_NAMESPACE": config.GetMetadata().GetNamespace(),
|
"K8S_POD_NAMESPACE": config.GetMetadata().GetNamespace(),
|
||||||
@ -412,33 +386,6 @@ func toRuntimeAuthConfig(a criconfig.AuthConfig) *runtime.AuthConfig {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// mounts defines how to sort runtime.Mount.
|
|
||||||
// This is the same with the Docker implementation:
|
|
||||||
// https://github.com/moby/moby/blob/17.05.x/daemon/volumes.go#L26
|
|
||||||
type orderedMounts []*runtime.Mount
|
|
||||||
|
|
||||||
// Len returns the number of mounts. Used in sorting.
|
|
||||||
func (m orderedMounts) Len() int {
|
|
||||||
return len(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Less returns true if the number of parts (a/b/c would be 3 parts) in the
|
|
||||||
// mount indexed by parameter 1 is less than that of the mount indexed by
|
|
||||||
// parameter 2. Used in sorting.
|
|
||||||
func (m orderedMounts) Less(i, j int) bool {
|
|
||||||
return m.parts(i) < m.parts(j)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Swap swaps two items in an array of mounts. Used in sorting
|
|
||||||
func (m orderedMounts) Swap(i, j int) {
|
|
||||||
m[i], m[j] = m[j], m[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
// parts returns the number of parts in the destination of a mount. Used in sorting.
|
|
||||||
func (m orderedMounts) parts(i int) int {
|
|
||||||
return strings.Count(filepath.Clean(m[i].ContainerPath), string(os.PathSeparator))
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseImageReferences parses a list of arbitrary image references and returns
|
// parseImageReferences parses a list of arbitrary image references and returns
|
||||||
// the repotags and repodigests
|
// the repotags and repodigests
|
||||||
func parseImageReferences(refs []string) ([]string, []string) {
|
func parseImageReferences(refs []string) ([]string, []string) {
|
||||||
@ -501,26 +448,71 @@ func getRuntimeOptions(c containers.Container) (interface{}, error) {
|
|||||||
return opts, nil
|
return opts, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getCurrentOOMScoreAdj() (int, error) {
|
const (
|
||||||
b, err := ioutil.ReadFile("/proc/self/oom_score_adj")
|
// unknownExitCode is the exit code when exit reason is unknown.
|
||||||
if err != nil {
|
unknownExitCode = 255
|
||||||
return 0, errors.Wrap(err, "could not get the daemon oom_score_adj")
|
// unknownExitReason is the exit reason when exit reason is unknown.
|
||||||
|
unknownExitReason = "Unknown"
|
||||||
|
)
|
||||||
|
|
||||||
|
// unknownContainerStatus returns the default container status when its status is unknown.
|
||||||
|
func unknownContainerStatus() containerstore.Status {
|
||||||
|
return containerstore.Status{
|
||||||
|
CreatedAt: 0,
|
||||||
|
StartedAt: 0,
|
||||||
|
FinishedAt: 0,
|
||||||
|
ExitCode: unknownExitCode,
|
||||||
|
Reason: unknownExitReason,
|
||||||
}
|
}
|
||||||
s := strings.TrimSpace(string(b))
|
|
||||||
i, err := strconv.Atoi(s)
|
|
||||||
if err != nil {
|
|
||||||
return 0, errors.Wrap(err, "could not get the daemon oom_score_adj")
|
|
||||||
}
|
|
||||||
return i, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func restrictOOMScoreAdj(preferredOOMScoreAdj int) (int, error) {
|
// unknownSandboxStatus returns the default sandbox status when its status is unknown.
|
||||||
currentOOMScoreAdj, err := getCurrentOOMScoreAdj()
|
func unknownSandboxStatus() sandboxstore.Status {
|
||||||
if err != nil {
|
return sandboxstore.Status{
|
||||||
return preferredOOMScoreAdj, err
|
State: sandboxstore.StateUnknown,
|
||||||
}
|
}
|
||||||
if preferredOOMScoreAdj < currentOOMScoreAdj {
|
}
|
||||||
return currentOOMScoreAdj, nil
|
|
||||||
}
|
// unknownExitStatus generates containerd.Status for container exited with unknown exit code.
|
||||||
return preferredOOMScoreAdj, nil
|
func unknownExitStatus() containerd.Status {
|
||||||
|
return containerd.Status{
|
||||||
|
Status: containerd.Stopped,
|
||||||
|
ExitStatus: unknownExitCode,
|
||||||
|
ExitTime: time.Now(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// getTaskStatus returns status for a given task. It returns unknown exit status if
|
||||||
|
// the task is nil or not found.
|
||||||
|
func getTaskStatus(ctx context.Context, task containerd.Task) (containerd.Status, error) {
|
||||||
|
if task == nil {
|
||||||
|
return unknownExitStatus(), nil
|
||||||
|
}
|
||||||
|
status, err := task.Status(ctx)
|
||||||
|
if err != nil {
|
||||||
|
if !errdefs.IsNotFound(err) {
|
||||||
|
return containerd.Status{}, err
|
||||||
|
}
|
||||||
|
return unknownExitStatus(), nil
|
||||||
|
}
|
||||||
|
return status, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getPassthroughAnnotations filters requested pod annotations by comparing
|
||||||
|
// against permitted annotations for the given runtime.
|
||||||
|
func getPassthroughAnnotations(podAnnotations map[string]string,
|
||||||
|
runtimePodAnnotations []string) (passthroughAnnotations map[string]string) {
|
||||||
|
passthroughAnnotations = make(map[string]string)
|
||||||
|
|
||||||
|
for podAnnotationKey, podAnnotationValue := range podAnnotations {
|
||||||
|
for _, pattern := range runtimePodAnnotations {
|
||||||
|
// Use path.Match instead of filepath.Match here.
|
||||||
|
// filepath.Match treated `\\` as path separator
|
||||||
|
// on windows, which is not what we want.
|
||||||
|
if ok, _ := path.Match(pattern, podAnnotationKey); ok {
|
||||||
|
passthroughAnnotations[podAnnotationKey] = podAnnotationValue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return passthroughAnnotations
|
||||||
}
|
}
|
||||||
|
5
vendor/github.com/containerd/cri/pkg/server/image_pull.go
generated
vendored
5
vendor/github.com/containerd/cri/pkg/server/image_pull.go
generated
vendored
@ -28,13 +28,12 @@ import (
|
|||||||
"github.com/containerd/containerd/reference"
|
"github.com/containerd/containerd/reference"
|
||||||
"github.com/containerd/containerd/remotes"
|
"github.com/containerd/containerd/remotes"
|
||||||
"github.com/containerd/containerd/remotes/docker"
|
"github.com/containerd/containerd/remotes/docker"
|
||||||
|
distribution "github.com/docker/distribution/reference"
|
||||||
imagespec "github.com/opencontainers/image-spec/specs-go/v1"
|
imagespec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||||
|
|
||||||
"github.com/containerd/cri/pkg/util"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// For image management:
|
// For image management:
|
||||||
@ -81,7 +80,7 @@ import (
|
|||||||
// PullImage pulls an image with authentication config.
|
// PullImage pulls an image with authentication config.
|
||||||
func (c *criService) PullImage(ctx context.Context, r *runtime.PullImageRequest) (*runtime.PullImageResponse, error) {
|
func (c *criService) PullImage(ctx context.Context, r *runtime.PullImageRequest) (*runtime.PullImageResponse, error) {
|
||||||
imageRef := r.GetImage().GetImage()
|
imageRef := r.GetImage().GetImage()
|
||||||
namedRef, err := util.NormalizeImageRef(imageRef)
|
namedRef, err := distribution.ParseDockerRef(imageRef)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "failed to parse image reference %q", imageRef)
|
return nil, errors.Wrapf(err, "failed to parse image reference %q", imageRef)
|
||||||
}
|
}
|
||||||
|
6
vendor/github.com/containerd/cri/pkg/server/instrumented_service.go
generated
vendored
6
vendor/github.com/containerd/cri/pkg/server/instrumented_service.go
generated
vendored
@ -52,7 +52,7 @@ func (in *instrumentedService) RunPodSandbox(ctx context.Context, r *runtime.Run
|
|||||||
if err := in.checkInitialized(); err != nil {
|
if err := in.checkInitialized(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
logrus.Infof("RunPodSandbox with config %+v", r.GetConfig())
|
logrus.Infof("RunPodsandbox for %+v", r.GetConfig().GetMetadata())
|
||||||
defer func() {
|
defer func() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.WithError(err).Errorf("RunPodSandbox for %+v failed, error", r.GetConfig().GetMetadata())
|
logrus.WithError(err).Errorf("RunPodSandbox for %+v failed, error", r.GetConfig().GetMetadata())
|
||||||
@ -142,8 +142,8 @@ func (in *instrumentedService) CreateContainer(ctx context.Context, r *runtime.C
|
|||||||
if err := in.checkInitialized(); err != nil {
|
if err := in.checkInitialized(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
logrus.Infof("CreateContainer within sandbox %q with container config %+v and sandbox config %+v",
|
logrus.Infof("CreateContainer within sandbox %q for container %+v",
|
||||||
r.GetPodSandboxId(), r.GetConfig(), r.GetSandboxConfig())
|
r.GetPodSandboxId(), r.GetConfig().GetMetadata())
|
||||||
defer func() {
|
defer func() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.WithError(err).Errorf("CreateContainer within sandbox %q for %+v failed",
|
logrus.WithError(err).Errorf("CreateContainer within sandbox %q for %+v failed",
|
||||||
|
88
vendor/github.com/containerd/cri/pkg/server/restart.go
generated
vendored
88
vendor/github.com/containerd/cri/pkg/server/restart.go
generated
vendored
@ -179,8 +179,9 @@ func (c *criService) loadContainer(ctx context.Context, cntr containerd.Containe
|
|||||||
status = unknownContainerStatus()
|
status = unknownContainerStatus()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Load up-to-date status from containerd.
|
|
||||||
var containerIO *cio.ContainerIO
|
var containerIO *cio.ContainerIO
|
||||||
|
err = func() error {
|
||||||
|
// Load up-to-date status from containerd.
|
||||||
t, err := cntr.Task(ctx, func(fifos *containerdio.FIFOSet) (_ containerdio.IO, err error) {
|
t, err := cntr.Task(ctx, func(fifos *containerdio.FIFOSet) (_ containerdio.IO, err error) {
|
||||||
stdoutWC, stderrWC, err := c.createContainerLoggers(meta.LogPath, meta.Config.GetTty())
|
stdoutWC, stderrWC, err := c.createContainerLoggers(meta.LogPath, meta.Config.GetTty())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -207,7 +208,7 @@ func (c *criService) loadContainer(ctx context.Context, cntr containerd.Containe
|
|||||||
return containerIO, nil
|
return containerIO, nil
|
||||||
})
|
})
|
||||||
if err != nil && !errdefs.IsNotFound(err) {
|
if err != nil && !errdefs.IsNotFound(err) {
|
||||||
return container, errors.Wrap(err, "failed to load task")
|
return errors.Wrap(err, "failed to load task")
|
||||||
}
|
}
|
||||||
var s containerd.Status
|
var s containerd.Status
|
||||||
var notFound bool
|
var notFound bool
|
||||||
@ -220,7 +221,7 @@ func (c *criService) loadContainer(ctx context.Context, cntr containerd.Containe
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
// It's still possible that task is deleted during this window.
|
// It's still possible that task is deleted during this window.
|
||||||
if !errdefs.IsNotFound(err) {
|
if !errdefs.IsNotFound(err) {
|
||||||
return container, errors.Wrap(err, "failed to get task status")
|
return errors.Wrap(err, "failed to get task status")
|
||||||
}
|
}
|
||||||
notFound = true
|
notFound = true
|
||||||
}
|
}
|
||||||
@ -237,7 +238,7 @@ func (c *criService) loadContainer(ctx context.Context, cntr containerd.Containe
|
|||||||
cio.WithNewFIFOs(volatileContainerDir, meta.Config.GetTty(), meta.Config.GetStdin()),
|
cio.WithNewFIFOs(volatileContainerDir, meta.Config.GetTty(), meta.Config.GetStdin()),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return container, errors.Wrap(err, "failed to create container io")
|
return errors.Wrap(err, "failed to create container io")
|
||||||
}
|
}
|
||||||
case runtime.ContainerState_CONTAINER_RUNNING:
|
case runtime.ContainerState_CONTAINER_RUNNING:
|
||||||
// Container was in running state, but its task has been deleted,
|
// Container was in running state, but its task has been deleted,
|
||||||
@ -256,17 +257,17 @@ func (c *criService) loadContainer(ctx context.Context, cntr containerd.Containe
|
|||||||
// gets restarted during container start.
|
// gets restarted during container start.
|
||||||
// Container must be in `CREATED` state.
|
// Container must be in `CREATED` state.
|
||||||
if _, err := t.Delete(ctx, containerd.WithProcessKill); err != nil && !errdefs.IsNotFound(err) {
|
if _, err := t.Delete(ctx, containerd.WithProcessKill); err != nil && !errdefs.IsNotFound(err) {
|
||||||
return container, errors.Wrap(err, "failed to delete task")
|
return errors.Wrap(err, "failed to delete task")
|
||||||
}
|
}
|
||||||
if status.State() != runtime.ContainerState_CONTAINER_CREATED {
|
if status.State() != runtime.ContainerState_CONTAINER_CREATED {
|
||||||
return container, errors.Errorf("unexpected container state for created task: %q", status.State())
|
return errors.Errorf("unexpected container state for created task: %q", status.State())
|
||||||
}
|
}
|
||||||
case containerd.Running:
|
case containerd.Running:
|
||||||
// Task is running. Container must be in `RUNNING` state, based on our assuption that
|
// Task is running. Container must be in `RUNNING` state, based on our assuption that
|
||||||
// "task should not be started when containerd is down".
|
// "task should not be started when containerd is down".
|
||||||
switch status.State() {
|
switch status.State() {
|
||||||
case runtime.ContainerState_CONTAINER_EXITED:
|
case runtime.ContainerState_CONTAINER_EXITED:
|
||||||
return container, errors.Errorf("unexpected container state for running task: %q", status.State())
|
return errors.Errorf("unexpected container state for running task: %q", status.State())
|
||||||
case runtime.ContainerState_CONTAINER_RUNNING:
|
case runtime.ContainerState_CONTAINER_RUNNING:
|
||||||
default:
|
default:
|
||||||
// This may happen if containerd gets restarted after task is started, but
|
// This may happen if containerd gets restarted after task is started, but
|
||||||
@ -277,42 +278,31 @@ func (c *criService) loadContainer(ctx context.Context, cntr containerd.Containe
|
|||||||
case containerd.Stopped:
|
case containerd.Stopped:
|
||||||
// Task is stopped. Updata status and delete the task.
|
// Task is stopped. Updata status and delete the task.
|
||||||
if _, err := t.Delete(ctx, containerd.WithProcessKill); err != nil && !errdefs.IsNotFound(err) {
|
if _, err := t.Delete(ctx, containerd.WithProcessKill); err != nil && !errdefs.IsNotFound(err) {
|
||||||
return container, errors.Wrap(err, "failed to delete task")
|
return errors.Wrap(err, "failed to delete task")
|
||||||
}
|
}
|
||||||
status.FinishedAt = s.ExitTime.UnixNano()
|
status.FinishedAt = s.ExitTime.UnixNano()
|
||||||
status.ExitCode = int32(s.ExitStatus)
|
status.ExitCode = int32(s.ExitStatus)
|
||||||
default:
|
default:
|
||||||
return container, errors.Errorf("unexpected task status %q", s.Status)
|
return errors.Errorf("unexpected task status %q", s.Status)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
|
}()
|
||||||
|
if err != nil {
|
||||||
|
logrus.WithError(err).Errorf("Failed to load container status for %q", id)
|
||||||
|
status = unknownContainerStatus()
|
||||||
|
}
|
||||||
opts := []containerstore.Opts{
|
opts := []containerstore.Opts{
|
||||||
containerstore.WithStatus(status, containerDir),
|
containerstore.WithStatus(status, containerDir),
|
||||||
containerstore.WithContainer(cntr),
|
containerstore.WithContainer(cntr),
|
||||||
}
|
}
|
||||||
|
// containerIO could be nil for container in unknown state.
|
||||||
if containerIO != nil {
|
if containerIO != nil {
|
||||||
opts = append(opts, containerstore.WithContainerIO(containerIO))
|
opts = append(opts, containerstore.WithContainerIO(containerIO))
|
||||||
}
|
}
|
||||||
return containerstore.NewContainer(*meta, opts...)
|
return containerstore.NewContainer(*meta, opts...)
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
|
||||||
// unknownExitCode is the exit code when exit reason is unknown.
|
|
||||||
unknownExitCode = 255
|
|
||||||
// unknownExitReason is the exit reason when exit reason is unknown.
|
|
||||||
unknownExitReason = "Unknown"
|
|
||||||
)
|
|
||||||
|
|
||||||
// unknownContainerStatus returns the default container status when its status is unknown.
|
|
||||||
func unknownContainerStatus() containerstore.Status {
|
|
||||||
return containerstore.Status{
|
|
||||||
CreatedAt: 0,
|
|
||||||
StartedAt: 0,
|
|
||||||
FinishedAt: 0,
|
|
||||||
ExitCode: unknownExitCode,
|
|
||||||
Reason: unknownExitReason,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// loadSandbox loads sandbox from containerd.
|
// loadSandbox loads sandbox from containerd.
|
||||||
func loadSandbox(ctx context.Context, cntr containerd.Container) (sandboxstore.Sandbox, error) {
|
func loadSandbox(ctx context.Context, cntr containerd.Container) (sandboxstore.Sandbox, error) {
|
||||||
ctx, cancel := context.WithTimeout(ctx, loadContainerTimeout)
|
ctx, cancel := context.WithTimeout(ctx, loadContainerTimeout)
|
||||||
@ -333,61 +323,59 @@ func loadSandbox(ctx context.Context, cntr containerd.Container) (sandboxstore.S
|
|||||||
}
|
}
|
||||||
meta := data.(*sandboxstore.Metadata)
|
meta := data.(*sandboxstore.Metadata)
|
||||||
|
|
||||||
|
s, err := func() (sandboxstore.Status, error) {
|
||||||
|
status := unknownSandboxStatus()
|
||||||
// Load sandbox created timestamp.
|
// Load sandbox created timestamp.
|
||||||
info, err := cntr.Info(ctx)
|
info, err := cntr.Info(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return sandbox, errors.Wrap(err, "failed to get sandbox container info")
|
return status, errors.Wrap(err, "failed to get sandbox container info")
|
||||||
}
|
}
|
||||||
createdAt := info.CreatedAt
|
status.CreatedAt = info.CreatedAt
|
||||||
|
|
||||||
// Load sandbox status.
|
// Load sandbox state.
|
||||||
t, err := cntr.Task(ctx, nil)
|
t, err := cntr.Task(ctx, nil)
|
||||||
if err != nil && !errdefs.IsNotFound(err) {
|
if err != nil && !errdefs.IsNotFound(err) {
|
||||||
return sandbox, errors.Wrap(err, "failed to load task")
|
return status, errors.Wrap(err, "failed to load task")
|
||||||
}
|
}
|
||||||
var s containerd.Status
|
var taskStatus containerd.Status
|
||||||
var notFound bool
|
var notFound bool
|
||||||
if errdefs.IsNotFound(err) {
|
if errdefs.IsNotFound(err) {
|
||||||
// Task is not found.
|
// Task is not found.
|
||||||
notFound = true
|
notFound = true
|
||||||
} else {
|
} else {
|
||||||
// Task is found. Get task status.
|
// Task is found. Get task status.
|
||||||
s, err = t.Status(ctx)
|
taskStatus, err = t.Status(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// It's still possible that task is deleted during this window.
|
// It's still possible that task is deleted during this window.
|
||||||
if !errdefs.IsNotFound(err) {
|
if !errdefs.IsNotFound(err) {
|
||||||
return sandbox, errors.Wrap(err, "failed to get task status")
|
return status, errors.Wrap(err, "failed to get task status")
|
||||||
}
|
}
|
||||||
notFound = true
|
notFound = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
var state sandboxstore.State
|
|
||||||
var pid uint32
|
|
||||||
if notFound {
|
if notFound {
|
||||||
// Task does not exist, set sandbox state as NOTREADY.
|
// Task does not exist, set sandbox state as NOTREADY.
|
||||||
state = sandboxstore.StateNotReady
|
status.State = sandboxstore.StateNotReady
|
||||||
} else {
|
} else {
|
||||||
if s.Status == containerd.Running {
|
if taskStatus.Status == containerd.Running {
|
||||||
// Task is running, set sandbox state as READY.
|
// Task is running, set sandbox state as READY.
|
||||||
state = sandboxstore.StateReady
|
status.State = sandboxstore.StateReady
|
||||||
pid = t.Pid()
|
status.Pid = t.Pid()
|
||||||
} else {
|
} else {
|
||||||
// Task is not running. Delete the task and set sandbox state as NOTREADY.
|
// Task is not running. Delete the task and set sandbox state as NOTREADY.
|
||||||
if _, err := t.Delete(ctx, containerd.WithProcessKill); err != nil && !errdefs.IsNotFound(err) {
|
if _, err := t.Delete(ctx, containerd.WithProcessKill); err != nil && !errdefs.IsNotFound(err) {
|
||||||
return sandbox, errors.Wrap(err, "failed to delete task")
|
return status, errors.Wrap(err, "failed to delete task")
|
||||||
}
|
}
|
||||||
state = sandboxstore.StateNotReady
|
status.State = sandboxstore.StateNotReady
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return status, nil
|
||||||
|
}()
|
||||||
|
if err != nil {
|
||||||
|
logrus.WithError(err).Errorf("Failed to load sandbox status for %q", cntr.ID())
|
||||||
|
}
|
||||||
|
|
||||||
sandbox = sandboxstore.NewSandbox(
|
sandbox = sandboxstore.NewSandbox(*meta, s)
|
||||||
*meta,
|
|
||||||
sandboxstore.Status{
|
|
||||||
Pid: pid,
|
|
||||||
CreatedAt: createdAt,
|
|
||||||
State: state,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
sandbox.Container = cntr
|
sandbox.Container = cntr
|
||||||
|
|
||||||
// Load network namespace.
|
// Load network namespace.
|
||||||
|
1
vendor/github.com/containerd/cri/pkg/server/sandbox_list.go
generated
vendored
1
vendor/github.com/containerd/cri/pkg/server/sandbox_list.go
generated
vendored
@ -53,6 +53,7 @@ func toCRISandbox(meta sandboxstore.Metadata, status sandboxstore.Status) *runti
|
|||||||
CreatedAt: status.CreatedAt.UnixNano(),
|
CreatedAt: status.CreatedAt.UnixNano(),
|
||||||
Labels: meta.Config.GetLabels(),
|
Labels: meta.Config.GetLabels(),
|
||||||
Annotations: meta.Config.GetAnnotations(),
|
Annotations: meta.Config.GetAnnotations(),
|
||||||
|
RuntimeHandler: meta.RuntimeHandler,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
5
vendor/github.com/containerd/cri/pkg/server/sandbox_remove.go
generated
vendored
5
vendor/github.com/containerd/cri/pkg/server/sandbox_remove.go
generated
vendored
@ -46,8 +46,9 @@ func (c *criService) RemovePodSandbox(ctx context.Context, r *runtime.RemovePodS
|
|||||||
// Use the full sandbox id.
|
// Use the full sandbox id.
|
||||||
id := sandbox.ID
|
id := sandbox.ID
|
||||||
|
|
||||||
// Return error if sandbox container is still running.
|
// Return error if sandbox container is still running or unknown.
|
||||||
if sandbox.Status.Get().State == sandboxstore.StateReady {
|
state := sandbox.Status.Get().State
|
||||||
|
if state == sandboxstore.StateReady || state == sandboxstore.StateUnknown {
|
||||||
return nil, errors.Errorf("sandbox container %q is not fully stopped", id)
|
return nil, errors.Errorf("sandbox container %q is not fully stopped", id)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
140
vendor/github.com/containerd/cri/pkg/server/sandbox_run.go
generated
vendored
140
vendor/github.com/containerd/cri/pkg/server/sandbox_run.go
generated
vendored
@ -28,6 +28,7 @@ import (
|
|||||||
"github.com/containerd/containerd/oci"
|
"github.com/containerd/containerd/oci"
|
||||||
cni "github.com/containerd/go-cni"
|
cni "github.com/containerd/go-cni"
|
||||||
"github.com/containerd/typeurl"
|
"github.com/containerd/typeurl"
|
||||||
|
"github.com/davecgh/go-spew/spew"
|
||||||
imagespec "github.com/opencontainers/image-spec/specs-go/v1"
|
imagespec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
runtimespec "github.com/opencontainers/runtime-spec/specs-go"
|
runtimespec "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
@ -55,6 +56,7 @@ func init() {
|
|||||||
// the sandbox is in ready state.
|
// the sandbox is in ready state.
|
||||||
func (c *criService) RunPodSandbox(ctx context.Context, r *runtime.RunPodSandboxRequest) (_ *runtime.RunPodSandboxResponse, retErr error) {
|
func (c *criService) RunPodSandbox(ctx context.Context, r *runtime.RunPodSandboxRequest) (_ *runtime.RunPodSandboxResponse, retErr error) {
|
||||||
config := r.GetConfig()
|
config := r.GetConfig()
|
||||||
|
logrus.Debugf("Sandbox config %+v", config)
|
||||||
|
|
||||||
// Generate unique id and name for the sandbox and reserve the name.
|
// Generate unique id and name for the sandbox and reserve the name.
|
||||||
id := util.GenerateID()
|
id := util.GenerateID()
|
||||||
@ -85,7 +87,7 @@ func (c *criService) RunPodSandbox(ctx context.Context, r *runtime.RunPodSandbox
|
|||||||
RuntimeHandler: r.GetRuntimeHandler(),
|
RuntimeHandler: r.GetRuntimeHandler(),
|
||||||
},
|
},
|
||||||
sandboxstore.Status{
|
sandboxstore.Status{
|
||||||
State: sandboxstore.StateUnknown,
|
State: sandboxstore.StateInit,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -145,11 +147,11 @@ func (c *criService) RunPodSandbox(ctx context.Context, r *runtime.RunPodSandbox
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Create sandbox container.
|
// Create sandbox container.
|
||||||
spec, err := c.generateSandboxContainerSpec(id, config, &image.ImageSpec.Config, sandbox.NetNSPath)
|
spec, err := c.generateSandboxContainerSpec(id, config, &image.ImageSpec.Config, sandbox.NetNSPath, ociRuntime.PodAnnotations)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to generate sandbox container spec")
|
return nil, errors.Wrap(err, "failed to generate sandbox container spec")
|
||||||
}
|
}
|
||||||
logrus.Debugf("Sandbox container spec: %+v", spec)
|
logrus.Debugf("Sandbox container %q spec: %#+v", id, spew.NewFormatter(spec))
|
||||||
|
|
||||||
var specOpts []oci.SpecOpts
|
var specOpts []oci.SpecOpts
|
||||||
userstr, err := generateUserString(
|
userstr, err := generateUserString(
|
||||||
@ -233,7 +235,7 @@ func (c *criService) RunPodSandbox(ctx context.Context, r *runtime.RunPodSandbox
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// Setup sandbox /dev/shm, /etc/hosts and /etc/resolv.conf.
|
// Setup sandbox /dev/shm, /etc/hosts, /etc/resolv.conf and /etc/hostname.
|
||||||
if err = c.setupSandboxFiles(id, config); err != nil {
|
if err = c.setupSandboxFiles(id, config); err != nil {
|
||||||
return nil, errors.Wrapf(err, "failed to setup sandbox files")
|
return nil, errors.Wrapf(err, "failed to setup sandbox files")
|
||||||
}
|
}
|
||||||
@ -258,7 +260,7 @@ func (c *criService) RunPodSandbox(ctx context.Context, r *runtime.RunPodSandbox
|
|||||||
return nil, errors.Wrap(err, "failed to update sandbox created timestamp")
|
return nil, errors.Wrap(err, "failed to update sandbox created timestamp")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add sandbox into sandbox store in UNKNOWN state.
|
// Add sandbox into sandbox store in INIT state.
|
||||||
sandbox.Container = container
|
sandbox.Container = container
|
||||||
if err := c.sandboxStore.Add(sandbox); err != nil {
|
if err := c.sandboxStore.Add(sandbox); err != nil {
|
||||||
return nil, errors.Wrapf(err, "failed to add sandbox %+v into store", sandbox)
|
return nil, errors.Wrapf(err, "failed to add sandbox %+v into store", sandbox)
|
||||||
@ -269,7 +271,7 @@ func (c *criService) RunPodSandbox(ctx context.Context, r *runtime.RunPodSandbox
|
|||||||
c.sandboxStore.Delete(id)
|
c.sandboxStore.Delete(id)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
// NOTE(random-liu): Sandbox state only stay in UNKNOWN state after this point
|
// NOTE(random-liu): Sandbox state only stay in INIT state after this point
|
||||||
// and before the end of this function.
|
// and before the end of this function.
|
||||||
// * If `Update` succeeds, sandbox state will become READY in one transaction.
|
// * If `Update` succeeds, sandbox state will become READY in one transaction.
|
||||||
// * If `Update` fails, sandbox will be removed from the store in the defer above.
|
// * If `Update` fails, sandbox will be removed from the store in the defer above.
|
||||||
@ -279,8 +281,8 @@ func (c *criService) RunPodSandbox(ctx context.Context, r *runtime.RunPodSandbox
|
|||||||
// * If the task is running, sandbox state will be READY,
|
// * If the task is running, sandbox state will be READY,
|
||||||
// * Or else, sandbox state will be NOTREADY.
|
// * Or else, sandbox state will be NOTREADY.
|
||||||
//
|
//
|
||||||
// In any case, sandbox will leave UNKNOWN state, so it's safe to ignore sandbox
|
// In any case, sandbox will leave INIT state, so it's safe to ignore sandbox
|
||||||
// in UNKNOWN state in other functions.
|
// in INIT state in other functions.
|
||||||
|
|
||||||
// Start sandbox container in one transaction to avoid race condition with
|
// Start sandbox container in one transaction to avoid race condition with
|
||||||
// event monitor.
|
// event monitor.
|
||||||
@ -293,8 +295,8 @@ func (c *criService) RunPodSandbox(ctx context.Context, r *runtime.RunPodSandbox
|
|||||||
// see the sandbox disappear after the defer clean up, which may confuse
|
// see the sandbox disappear after the defer clean up, which may confuse
|
||||||
// them.
|
// them.
|
||||||
//
|
//
|
||||||
// Given so, we should keep the sandbox in UNKNOWN state if `Update` fails,
|
// Given so, we should keep the sandbox in INIT state if `Update` fails,
|
||||||
// and ignore sandbox in UNKNOWN state in all the inspection functions.
|
// and ignore sandbox in INIT state in all the inspection functions.
|
||||||
|
|
||||||
// Create sandbox task in containerd.
|
// Create sandbox task in containerd.
|
||||||
log.Tracef("Create sandbox container (id=%q, name=%q).",
|
log.Tracef("Create sandbox container (id=%q, name=%q).",
|
||||||
@ -338,70 +340,65 @@ func (c *criService) RunPodSandbox(ctx context.Context, r *runtime.RunPodSandbox
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *criService) generateSandboxContainerSpec(id string, config *runtime.PodSandboxConfig,
|
func (c *criService) generateSandboxContainerSpec(id string, config *runtime.PodSandboxConfig,
|
||||||
imageConfig *imagespec.ImageConfig, nsPath string) (*runtimespec.Spec, error) {
|
imageConfig *imagespec.ImageConfig, nsPath string, runtimePodAnnotations []string) (*runtimespec.Spec, error) {
|
||||||
// Creates a spec Generator with the default spec.
|
// Creates a spec Generator with the default spec.
|
||||||
// TODO(random-liu): [P1] Compare the default settings with docker and containerd default.
|
// TODO(random-liu): [P1] Compare the default settings with docker and containerd default.
|
||||||
spec, err := defaultRuntimeSpec(id)
|
specOpts := []oci.SpecOpts{
|
||||||
if err != nil {
|
customopts.WithoutRunMount,
|
||||||
return nil, err
|
customopts.WithoutDefaultSecuritySettings,
|
||||||
|
customopts.WithRelativeRoot(relativeRootfsPath),
|
||||||
|
oci.WithEnv(imageConfig.Env),
|
||||||
|
oci.WithRootFSReadonly(),
|
||||||
|
oci.WithHostname(config.GetHostname()),
|
||||||
}
|
}
|
||||||
g := newSpecGenerator(spec)
|
|
||||||
|
|
||||||
// Apply default config from image config.
|
|
||||||
if err := addImageEnvs(&g, imageConfig.Env); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if imageConfig.WorkingDir != "" {
|
if imageConfig.WorkingDir != "" {
|
||||||
g.SetProcessCwd(imageConfig.WorkingDir)
|
specOpts = append(specOpts, oci.WithProcessCwd(imageConfig.WorkingDir))
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(imageConfig.Entrypoint) == 0 && len(imageConfig.Cmd) == 0 {
|
if len(imageConfig.Entrypoint) == 0 && len(imageConfig.Cmd) == 0 {
|
||||||
// Pause image must have entrypoint or cmd.
|
// Pause image must have entrypoint or cmd.
|
||||||
return nil, errors.Errorf("invalid empty entrypoint and cmd in image config %+v", imageConfig)
|
return nil, errors.Errorf("invalid empty entrypoint and cmd in image config %+v", imageConfig)
|
||||||
}
|
}
|
||||||
// Set process commands.
|
specOpts = append(specOpts, oci.WithProcessArgs(append(imageConfig.Entrypoint, imageConfig.Cmd...)...))
|
||||||
g.SetProcessArgs(append(imageConfig.Entrypoint, imageConfig.Cmd...))
|
|
||||||
|
|
||||||
// Set relative root path.
|
|
||||||
g.SetRootPath(relativeRootfsPath)
|
|
||||||
|
|
||||||
// Make root of sandbox container read-only.
|
|
||||||
g.SetRootReadonly(true)
|
|
||||||
|
|
||||||
// Set hostname.
|
|
||||||
g.SetHostname(config.GetHostname())
|
|
||||||
|
|
||||||
// TODO(random-liu): [P2] Consider whether to add labels and annotations to the container.
|
// TODO(random-liu): [P2] Consider whether to add labels and annotations to the container.
|
||||||
|
|
||||||
// Set cgroups parent.
|
// Set cgroups parent.
|
||||||
if c.config.DisableCgroup {
|
if c.config.DisableCgroup {
|
||||||
g.SetLinuxCgroupsPath("")
|
specOpts = append(specOpts, customopts.WithDisabledCgroups)
|
||||||
} else {
|
} else {
|
||||||
if config.GetLinux().GetCgroupParent() != "" {
|
if config.GetLinux().GetCgroupParent() != "" {
|
||||||
cgroupsPath := getCgroupsPath(config.GetLinux().GetCgroupParent(), id,
|
cgroupsPath := getCgroupsPath(config.GetLinux().GetCgroupParent(), id,
|
||||||
c.config.SystemdCgroup)
|
c.config.SystemdCgroup)
|
||||||
g.SetLinuxCgroupsPath(cgroupsPath)
|
specOpts = append(specOpts, oci.WithCgroup(cgroupsPath))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// When cgroup parent is not set, containerd-shim will create container in a child cgroup
|
// When cgroup parent is not set, containerd-shim will create container in a child cgroup
|
||||||
// of the cgroup itself is in.
|
// of the cgroup itself is in.
|
||||||
// TODO(random-liu): [P2] Set default cgroup path if cgroup parent is not specified.
|
// TODO(random-liu): [P2] Set default cgroup path if cgroup parent is not specified.
|
||||||
|
|
||||||
// Set namespace options.
|
// Set namespace options.
|
||||||
securityContext := config.GetLinux().GetSecurityContext()
|
var (
|
||||||
nsOptions := securityContext.GetNamespaceOptions()
|
securityContext = config.GetLinux().GetSecurityContext()
|
||||||
|
nsOptions = securityContext.GetNamespaceOptions()
|
||||||
|
)
|
||||||
if nsOptions.GetNetwork() == runtime.NamespaceMode_NODE {
|
if nsOptions.GetNetwork() == runtime.NamespaceMode_NODE {
|
||||||
g.RemoveLinuxNamespace(string(runtimespec.NetworkNamespace)) // nolint: errcheck
|
specOpts = append(specOpts, customopts.WithoutNamespace(runtimespec.NetworkNamespace))
|
||||||
|
specOpts = append(specOpts, customopts.WithoutNamespace(runtimespec.UTSNamespace))
|
||||||
} else {
|
} else {
|
||||||
//TODO(Abhi): May be move this to containerd spec opts (WithLinuxSpaceOption)
|
//TODO(Abhi): May be move this to containerd spec opts (WithLinuxSpaceOption)
|
||||||
g.AddOrReplaceLinuxNamespace(string(runtimespec.NetworkNamespace), nsPath) // nolint: errcheck
|
specOpts = append(specOpts, oci.WithLinuxNamespace(
|
||||||
|
runtimespec.LinuxNamespace{
|
||||||
|
Type: runtimespec.NetworkNamespace,
|
||||||
|
Path: nsPath,
|
||||||
|
}))
|
||||||
}
|
}
|
||||||
if nsOptions.GetPid() == runtime.NamespaceMode_NODE {
|
if nsOptions.GetPid() == runtime.NamespaceMode_NODE {
|
||||||
g.RemoveLinuxNamespace(string(runtimespec.PIDNamespace)) // nolint: errcheck
|
specOpts = append(specOpts, customopts.WithoutNamespace(runtimespec.PIDNamespace))
|
||||||
}
|
}
|
||||||
if nsOptions.GetIpc() == runtime.NamespaceMode_NODE {
|
if nsOptions.GetIpc() == runtime.NamespaceMode_NODE {
|
||||||
g.RemoveLinuxNamespace(string(runtimespec.IPCNamespace)) // nolint: errcheck
|
specOpts = append(specOpts, customopts.WithoutNamespace(runtimespec.IPCNamespace))
|
||||||
}
|
}
|
||||||
|
|
||||||
// It's fine to generate the spec before the sandbox /dev/shm
|
// It's fine to generate the spec before the sandbox /dev/shm
|
||||||
@ -410,55 +407,68 @@ func (c *criService) generateSandboxContainerSpec(id string, config *runtime.Pod
|
|||||||
if nsOptions.GetIpc() == runtime.NamespaceMode_NODE {
|
if nsOptions.GetIpc() == runtime.NamespaceMode_NODE {
|
||||||
sandboxDevShm = devShm
|
sandboxDevShm = devShm
|
||||||
}
|
}
|
||||||
g.AddMount(runtimespec.Mount{
|
specOpts = append(specOpts, oci.WithMounts([]runtimespec.Mount{
|
||||||
|
{
|
||||||
Source: sandboxDevShm,
|
Source: sandboxDevShm,
|
||||||
Destination: devShm,
|
Destination: devShm,
|
||||||
Type: "bind",
|
Type: "bind",
|
||||||
Options: []string{"rbind", "ro"},
|
Options: []string{"rbind", "ro"},
|
||||||
})
|
},
|
||||||
|
}))
|
||||||
|
|
||||||
selinuxOpt := securityContext.GetSelinuxOptions()
|
selinuxOpt := securityContext.GetSelinuxOptions()
|
||||||
processLabel, mountLabel, err := initSelinuxOpts(selinuxOpt)
|
processLabel, mountLabel, err := initSelinuxOpts(selinuxOpt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "failed to init selinux options %+v", securityContext.GetSelinuxOptions())
|
return nil, errors.Wrapf(err, "failed to init selinux options %+v", securityContext.GetSelinuxOptions())
|
||||||
}
|
}
|
||||||
g.SetProcessSelinuxLabel(processLabel)
|
|
||||||
g.SetLinuxMountLabel(mountLabel)
|
|
||||||
|
|
||||||
supplementalGroups := securityContext.GetSupplementalGroups()
|
supplementalGroups := securityContext.GetSupplementalGroups()
|
||||||
for _, group := range supplementalGroups {
|
specOpts = append(specOpts,
|
||||||
g.AddProcessAdditionalGid(uint32(group))
|
customopts.WithSelinuxLabels(processLabel, mountLabel),
|
||||||
}
|
customopts.WithSupplementalGroups(supplementalGroups),
|
||||||
|
)
|
||||||
|
|
||||||
// Add sysctls
|
// Add sysctls
|
||||||
sysctls := config.GetLinux().GetSysctls()
|
sysctls := config.GetLinux().GetSysctls()
|
||||||
for key, value := range sysctls {
|
specOpts = append(specOpts, customopts.WithSysctls(sysctls))
|
||||||
g.AddLinuxSysctl(key, value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Note: LinuxSandboxSecurityContext does not currently provide an apparmor profile
|
// Note: LinuxSandboxSecurityContext does not currently provide an apparmor profile
|
||||||
|
|
||||||
if !c.config.DisableCgroup {
|
if !c.config.DisableCgroup {
|
||||||
g.SetLinuxResourcesCPUShares(uint64(defaultSandboxCPUshares))
|
specOpts = append(specOpts, customopts.WithDefaultSandboxShares)
|
||||||
}
|
}
|
||||||
adj := int(defaultSandboxOOMAdj)
|
specOpts = append(specOpts, customopts.WithPodOOMScoreAdj(int(defaultSandboxOOMAdj), c.config.RestrictOOMScoreAdj))
|
||||||
if c.config.RestrictOOMScoreAdj {
|
|
||||||
adj, err = restrictOOMScoreAdj(adj)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
g.SetProcessOOMScoreAdj(adj)
|
|
||||||
|
|
||||||
g.AddAnnotation(annotations.ContainerType, annotations.ContainerTypeSandbox)
|
for pKey, pValue := range getPassthroughAnnotations(config.Annotations,
|
||||||
g.AddAnnotation(annotations.SandboxID, id)
|
runtimePodAnnotations) {
|
||||||
|
specOpts = append(specOpts, customopts.WithAnnotation(pKey, pValue))
|
||||||
|
}
|
||||||
|
|
||||||
return g.Config, nil
|
specOpts = append(specOpts,
|
||||||
|
customopts.WithAnnotation(annotations.ContainerType, annotations.ContainerTypeSandbox),
|
||||||
|
customopts.WithAnnotation(annotations.SandboxID, id),
|
||||||
|
customopts.WithAnnotation(annotations.SandboxLogDir, config.GetLogDirectory()),
|
||||||
|
)
|
||||||
|
|
||||||
|
return runtimeSpec(id, specOpts...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// setupSandboxFiles sets up necessary sandbox files including /dev/shm, /etc/hosts
|
// setupSandboxFiles sets up necessary sandbox files including /dev/shm, /etc/hosts,
|
||||||
// and /etc/resolv.conf.
|
// /etc/resolv.conf and /etc/hostname.
|
||||||
func (c *criService) setupSandboxFiles(id string, config *runtime.PodSandboxConfig) error {
|
func (c *criService) setupSandboxFiles(id string, config *runtime.PodSandboxConfig) error {
|
||||||
|
sandboxEtcHostname := c.getSandboxHostname(id)
|
||||||
|
hostname := config.GetHostname()
|
||||||
|
if hostname == "" {
|
||||||
|
var err error
|
||||||
|
hostname, err = c.os.Hostname()
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "failed to get hostname")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := c.os.WriteFile(sandboxEtcHostname, []byte(hostname+"\n"), 0644); err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to write hostname to %q", sandboxEtcHostname)
|
||||||
|
}
|
||||||
|
|
||||||
// TODO(random-liu): Consider whether we should maintain /etc/hosts and /etc/resolv.conf in kubelet.
|
// TODO(random-liu): Consider whether we should maintain /etc/hosts and /etc/resolv.conf in kubelet.
|
||||||
sandboxEtcHosts := c.getSandboxHosts(id)
|
sandboxEtcHosts := c.getSandboxHosts(id)
|
||||||
if err := c.os.CopyFile(etcHosts, sandboxEtcHosts, 0644); err != nil {
|
if err := c.os.CopyFile(etcHosts, sandboxEtcHosts, 0644); err != nil {
|
||||||
|
14
vendor/github.com/containerd/cri/pkg/server/sandbox_status.go
generated
vendored
14
vendor/github.com/containerd/cri/pkg/server/sandbox_status.go
generated
vendored
@ -42,6 +42,14 @@ func (c *criService) PodSandboxStatus(ctx context.Context, r *runtime.PodSandbox
|
|||||||
return nil, errors.Wrap(err, "failed to get sandbox ip")
|
return nil, errors.Wrap(err, "failed to get sandbox ip")
|
||||||
}
|
}
|
||||||
status := toCRISandboxStatus(sandbox.Metadata, sandbox.Status.Get(), ip)
|
status := toCRISandboxStatus(sandbox.Metadata, sandbox.Status.Get(), ip)
|
||||||
|
if status.GetCreatedAt() == 0 {
|
||||||
|
// CRI doesn't allow CreatedAt == 0.
|
||||||
|
info, err := sandbox.Container.Info(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "failed to get CreatedAt for sandbox container in %q state", status.State)
|
||||||
|
}
|
||||||
|
status.CreatedAt = info.CreatedAt.UnixNano()
|
||||||
|
}
|
||||||
if !r.GetVerbose() {
|
if !r.GetVerbose() {
|
||||||
return &runtime.PodSandboxStatusResponse{Status: status}, nil
|
return &runtime.PodSandboxStatusResponse{Status: status}, nil
|
||||||
}
|
}
|
||||||
@ -101,6 +109,7 @@ func toCRISandboxStatus(meta sandboxstore.Metadata, status sandboxstore.Status,
|
|||||||
},
|
},
|
||||||
Labels: meta.Config.GetLabels(),
|
Labels: meta.Config.GetLabels(),
|
||||||
Annotations: meta.Config.GetAnnotations(),
|
Annotations: meta.Config.GetAnnotations(),
|
||||||
|
RuntimeHandler: meta.RuntimeHandler,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -113,7 +122,10 @@ type SandboxInfo struct {
|
|||||||
Image string `json:"image"`
|
Image string `json:"image"`
|
||||||
SnapshotKey string `json:"snapshotKey"`
|
SnapshotKey string `json:"snapshotKey"`
|
||||||
Snapshotter string `json:"snapshotter"`
|
Snapshotter string `json:"snapshotter"`
|
||||||
RuntimeHandler string `json:"runtimeHandler"`
|
// Note: a new field `RuntimeHandler` has been added into the CRI PodSandboxStatus struct, and
|
||||||
|
// should be set. This `RuntimeHandler` field will be deprecated after containerd 1.3 (tracked
|
||||||
|
// in https://github.com/containerd/cri/issues/1064).
|
||||||
|
RuntimeHandler string `json:"runtimeHandler"` // see the Note above
|
||||||
RuntimeType string `json:"runtimeType"`
|
RuntimeType string `json:"runtimeType"`
|
||||||
RuntimeOptions interface{} `json:"runtimeOptions"`
|
RuntimeOptions interface{} `json:"runtimeOptions"`
|
||||||
Config *runtime.PodSandboxConfig `json:"config"`
|
Config *runtime.PodSandboxConfig `json:"config"`
|
||||||
|
52
vendor/github.com/containerd/cri/pkg/server/sandbox_stop.go
generated
vendored
52
vendor/github.com/containerd/cri/pkg/server/sandbox_stop.go
generated
vendored
@ -19,6 +19,8 @@ package server
|
|||||||
import (
|
import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd"
|
||||||
|
eventtypes "github.com/containerd/containerd/api/events"
|
||||||
"github.com/containerd/containerd/errdefs"
|
"github.com/containerd/containerd/errdefs"
|
||||||
cni "github.com/containerd/go-cni"
|
cni "github.com/containerd/go-cni"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
@ -60,10 +62,11 @@ func (c *criService) StopPodSandbox(ctx context.Context, r *runtime.StopPodSandb
|
|||||||
return nil, errors.Wrap(err, "failed to unmount sandbox files")
|
return nil, errors.Wrap(err, "failed to unmount sandbox files")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Only stop sandbox container when it's running.
|
// Only stop sandbox container when it's running or unknown.
|
||||||
if sandbox.Status.Get().State == sandboxstore.StateReady {
|
state := sandbox.Status.Get().State
|
||||||
|
if state == sandboxstore.StateReady || state == sandboxstore.StateUnknown {
|
||||||
if err := c.stopSandboxContainer(ctx, sandbox); err != nil {
|
if err := c.stopSandboxContainer(ctx, sandbox); err != nil {
|
||||||
return nil, errors.Wrapf(err, "failed to stop sandbox container %q", id)
|
return nil, errors.Wrapf(err, "failed to stop sandbox container %q in %q state", id, state)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -95,12 +98,36 @@ func (c *criService) StopPodSandbox(ctx context.Context, r *runtime.StopPodSandb
|
|||||||
// the event monitor handles the `TaskExit` event.
|
// the event monitor handles the `TaskExit` event.
|
||||||
func (c *criService) stopSandboxContainer(ctx context.Context, sandbox sandboxstore.Sandbox) error {
|
func (c *criService) stopSandboxContainer(ctx context.Context, sandbox sandboxstore.Sandbox) error {
|
||||||
container := sandbox.Container
|
container := sandbox.Container
|
||||||
|
state := sandbox.Status.Get().State
|
||||||
task, err := container.Task(ctx, nil)
|
task, err := container.Task(ctx, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errdefs.IsNotFound(err) {
|
if !errdefs.IsNotFound(err) {
|
||||||
|
return errors.Wrap(err, "failed to get sandbox container")
|
||||||
|
}
|
||||||
|
// Don't return for unknown state, some cleanup needs to be done.
|
||||||
|
if state != sandboxstore.StateUnknown {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return errors.Wrap(err, "failed to get sandbox container")
|
// Task is an interface, explicitly set it to nil just in case.
|
||||||
|
task = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle unknown state.
|
||||||
|
// The cleanup logic is the same with container unknown state.
|
||||||
|
if state == sandboxstore.StateUnknown {
|
||||||
|
status, err := getTaskStatus(ctx, task)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to get task status for %q", sandbox.ID)
|
||||||
|
}
|
||||||
|
switch status.Status {
|
||||||
|
case containerd.Running, containerd.Created:
|
||||||
|
// The task is still running, continue stopping the task.
|
||||||
|
case containerd.Stopped:
|
||||||
|
// The task has exited, explicitly cleanup.
|
||||||
|
return cleanupUnknownSandbox(ctx, sandbox.ID, status, sandbox)
|
||||||
|
default:
|
||||||
|
return errors.Wrapf(err, "unsupported task status %q", status.Status)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Kill the sandbox container.
|
// Kill the sandbox container.
|
||||||
@ -117,7 +144,7 @@ func (c *criService) waitSandboxStop(ctx context.Context, sandbox sandboxstore.S
|
|||||||
defer timeoutTimer.Stop()
|
defer timeoutTimer.Stop()
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return errors.Errorf("wait sandbox container %q is cancelled", sandbox.ID)
|
return errors.Wrapf(ctx.Err(), "wait sandbox container %q is cancelled", sandbox.ID)
|
||||||
case <-timeoutTimer.C:
|
case <-timeoutTimer.C:
|
||||||
return errors.Errorf("wait sandbox container %q stop timeout", sandbox.ID)
|
return errors.Errorf("wait sandbox container %q stop timeout", sandbox.ID)
|
||||||
case <-sandbox.Stopped():
|
case <-sandbox.Stopped():
|
||||||
@ -137,3 +164,16 @@ func (c *criService) teardownPod(id string, path string, config *runtime.PodSand
|
|||||||
cni.WithLabels(labels),
|
cni.WithLabels(labels),
|
||||||
cni.WithCapabilityPortMap(toCNIPortMappings(config.GetPortMappings())))
|
cni.WithCapabilityPortMap(toCNIPortMappings(config.GetPortMappings())))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// cleanupUnknownSandbox cleanup stopped sandbox in unknown state.
|
||||||
|
func cleanupUnknownSandbox(ctx context.Context, id string, status containerd.Status,
|
||||||
|
sandbox sandboxstore.Sandbox) error {
|
||||||
|
// Reuse handleSandboxExit to do the cleanup.
|
||||||
|
return handleSandboxExit(ctx, &eventtypes.TaskExit{
|
||||||
|
ContainerID: id,
|
||||||
|
ID: id,
|
||||||
|
Pid: 0,
|
||||||
|
ExitStatus: status.ExitStatus,
|
||||||
|
ExitedAt: status.ExitTime,
|
||||||
|
}, sandbox)
|
||||||
|
}
|
||||||
|
2
vendor/github.com/containerd/cri/pkg/server/service.go
generated
vendored
2
vendor/github.com/containerd/cri/pkg/server/service.go
generated
vendored
@ -159,7 +159,7 @@ func NewCRIService(config criconfig.Config, client *containerd.Client) (CRIServi
|
|||||||
logrus.WithError(err).Error("Failed to load cni during init, please check CRI plugin status before setting up network for pods")
|
logrus.WithError(err).Error("Failed to load cni during init, please check CRI plugin status before setting up network for pods")
|
||||||
}
|
}
|
||||||
// prepare streaming server
|
// prepare streaming server
|
||||||
c.streamServer, err = newStreamServer(c, config.StreamServerAddress, config.StreamServerPort)
|
c.streamServer, err = newStreamServer(c, config.StreamServerAddress, config.StreamServerPort, config.StreamIdleTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to create stream server")
|
return nil, errors.Wrap(err, "failed to create stream server")
|
||||||
}
|
}
|
||||||
|
10
vendor/github.com/containerd/cri/pkg/server/streaming.go
generated
vendored
10
vendor/github.com/containerd/cri/pkg/server/streaming.go
generated
vendored
@ -22,6 +22,7 @@ import (
|
|||||||
"math"
|
"math"
|
||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
k8snet "k8s.io/apimachinery/pkg/util/net"
|
k8snet "k8s.io/apimachinery/pkg/util/net"
|
||||||
@ -64,7 +65,7 @@ func getStreamListenerMode(c *criService) (streamListenerMode, error) {
|
|||||||
return withoutTLS, nil
|
return withoutTLS, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func newStreamServer(c *criService, addr, port string) (streaming.Server, error) {
|
func newStreamServer(c *criService, addr, port, streamIdleTimeout string) (streaming.Server, error) {
|
||||||
if addr == "" {
|
if addr == "" {
|
||||||
a, err := k8snet.ChooseBindAddress(nil)
|
a, err := k8snet.ChooseBindAddress(nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -73,6 +74,13 @@ func newStreamServer(c *criService, addr, port string) (streaming.Server, error)
|
|||||||
addr = a.String()
|
addr = a.String()
|
||||||
}
|
}
|
||||||
config := streaming.DefaultConfig
|
config := streaming.DefaultConfig
|
||||||
|
if streamIdleTimeout != "" {
|
||||||
|
var err error
|
||||||
|
config.StreamIdleTimeout, err = time.ParseDuration(streamIdleTimeout)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "invalid stream idle timeout")
|
||||||
|
}
|
||||||
|
}
|
||||||
config.Addr = net.JoinHostPort(addr, port)
|
config.Addr = net.JoinHostPort(addr, port)
|
||||||
run := newStreamRuntime(c)
|
run := newStreamRuntime(c)
|
||||||
tlsMode, err := getStreamListenerMode(c)
|
tlsMode, err := getStreamListenerMode(c)
|
||||||
|
3
vendor/github.com/containerd/cri/pkg/store/container/container.go
generated
vendored
3
vendor/github.com/containerd/cri/pkg/store/container/container.go
generated
vendored
@ -36,7 +36,8 @@ type Container struct {
|
|||||||
Status StatusStorage
|
Status StatusStorage
|
||||||
// Container is the containerd container client.
|
// Container is the containerd container client.
|
||||||
Container containerd.Container
|
Container containerd.Container
|
||||||
// Container IO
|
// Container IO.
|
||||||
|
// IO could only be nil when the container is in unknown state.
|
||||||
IO *cio.ContainerIO
|
IO *cio.ContainerIO
|
||||||
// StopCh is used to propagate the stop information of the container.
|
// StopCh is used to propagate the stop information of the container.
|
||||||
*store.StopCh
|
*store.StopCh
|
||||||
|
38
vendor/github.com/containerd/cri/pkg/store/container/status.go
generated
vendored
38
vendor/github.com/containerd/cri/pkg/store/container/status.go
generated
vendored
@ -23,11 +23,43 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/docker/docker/pkg/ioutils"
|
"github.com/containerd/continuity"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// The container state machine in the CRI plugin:
|
||||||
|
//
|
||||||
|
// + +
|
||||||
|
// | |
|
||||||
|
// | Create | Load
|
||||||
|
// | |
|
||||||
|
// +----v----+ |
|
||||||
|
// | | |
|
||||||
|
// | CREATED <---------+-----------+
|
||||||
|
// | | | |
|
||||||
|
// +----+----- | |
|
||||||
|
// | | |
|
||||||
|
// | Start | |
|
||||||
|
// | | |
|
||||||
|
// +----v----+ | |
|
||||||
|
// Exec +--------+ | | |
|
||||||
|
// Attach | | RUNNING <---------+ |
|
||||||
|
// LogReopen +--------> | | |
|
||||||
|
// +----+----+ | |
|
||||||
|
// | | |
|
||||||
|
// | Stop/Exit | |
|
||||||
|
// | | |
|
||||||
|
// +----v----+ | |
|
||||||
|
// | <---------+ +----v----+
|
||||||
|
// | EXITED | | |
|
||||||
|
// | <----------------+ UNKNOWN |
|
||||||
|
// +----+----+ Stop | |
|
||||||
|
// | +---------+
|
||||||
|
// | Remove
|
||||||
|
// v
|
||||||
|
// DELETED
|
||||||
|
|
||||||
// statusVersion is current version of container status.
|
// statusVersion is current version of container status.
|
||||||
const statusVersion = "v1" // nolint
|
const statusVersion = "v1" // nolint
|
||||||
|
|
||||||
@ -128,7 +160,7 @@ func StoreStatus(root, id string, status Status) (StatusStorage, error) {
|
|||||||
return nil, errors.Wrap(err, "failed to encode status")
|
return nil, errors.Wrap(err, "failed to encode status")
|
||||||
}
|
}
|
||||||
path := filepath.Join(root, "status")
|
path := filepath.Join(root, "status")
|
||||||
if err := ioutils.AtomicWriteFile(path, data, 0600); err != nil {
|
if err := continuity.AtomicWriteFile(path, data, 0600); err != nil {
|
||||||
return nil, errors.Wrapf(err, "failed to checkpoint status to %q", path)
|
return nil, errors.Wrapf(err, "failed to checkpoint status to %q", path)
|
||||||
}
|
}
|
||||||
return &statusStorage{
|
return &statusStorage{
|
||||||
@ -177,7 +209,7 @@ func (s *statusStorage) UpdateSync(u UpdateFunc) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to encode status")
|
return errors.Wrap(err, "failed to encode status")
|
||||||
}
|
}
|
||||||
if err := ioutils.AtomicWriteFile(s.path, data, 0600); err != nil {
|
if err := continuity.AtomicWriteFile(s.path, data, 0600); err != nil {
|
||||||
return errors.Wrapf(err, "failed to checkpoint status to %q", s.path)
|
return errors.Wrapf(err, "failed to checkpoint status to %q", s.path)
|
||||||
}
|
}
|
||||||
s.status = newStatus
|
s.status = newStatus
|
||||||
|
4
vendor/github.com/containerd/cri/pkg/store/sandbox/sandbox.go
generated
vendored
4
vendor/github.com/containerd/cri/pkg/store/sandbox/sandbox.go
generated
vendored
@ -93,7 +93,7 @@ func (s *Store) Get(id string) (Sandbox, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return sb, err
|
return sb, err
|
||||||
}
|
}
|
||||||
if sb.Status.Get().State == StateUnknown {
|
if sb.Status.Get().State == StateInit {
|
||||||
return Sandbox{}, store.ErrNotExist
|
return Sandbox{}, store.ErrNotExist
|
||||||
}
|
}
|
||||||
return sb, nil
|
return sb, nil
|
||||||
@ -123,7 +123,7 @@ func (s *Store) List() []Sandbox {
|
|||||||
defer s.lock.RUnlock()
|
defer s.lock.RUnlock()
|
||||||
var sandboxes []Sandbox
|
var sandboxes []Sandbox
|
||||||
for _, sb := range s.sandboxes {
|
for _, sb := range s.sandboxes {
|
||||||
if sb.Status.Get().State == StateUnknown {
|
if sb.Status.Get().State == StateInit {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
sandboxes = append(sandboxes, sb)
|
sandboxes = append(sandboxes, sb)
|
||||||
|
49
vendor/github.com/containerd/cri/pkg/store/sandbox/status.go
generated
vendored
49
vendor/github.com/containerd/cri/pkg/store/sandbox/status.go
generated
vendored
@ -21,16 +21,52 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// The sandbox state machine in the CRI plugin:
|
||||||
|
// + +
|
||||||
|
// | |
|
||||||
|
// | Create(Run) | Load
|
||||||
|
// | |
|
||||||
|
// Start +----v----+ |
|
||||||
|
// (failed) | | |
|
||||||
|
// +-------------+ INIT | +-----------+
|
||||||
|
// | | | | |
|
||||||
|
// | +----+----+ | |
|
||||||
|
// | | | |
|
||||||
|
// | | Start(Run) | |
|
||||||
|
// | | | |
|
||||||
|
// | PortForward +----v----+ | |
|
||||||
|
// | +------+ | | |
|
||||||
|
// | | | READY <---------+ |
|
||||||
|
// | +------> | | |
|
||||||
|
// | +----+----+ | |
|
||||||
|
// | | | |
|
||||||
|
// | | Stop/Exit | |
|
||||||
|
// | | | |
|
||||||
|
// | +----v----+ | |
|
||||||
|
// | | <---------+ +----v----+
|
||||||
|
// | | NOTREADY| | |
|
||||||
|
// | | <----------------+ UNKNOWN |
|
||||||
|
// | +----+----+ Stop | |
|
||||||
|
// | | +---------+
|
||||||
|
// | | Remove
|
||||||
|
// | v
|
||||||
|
// +-------------> DELETED
|
||||||
|
|
||||||
// State is the sandbox state we use in containerd/cri.
|
// State is the sandbox state we use in containerd/cri.
|
||||||
// It has unknown state defined.
|
// It includes init and unknown, which are internal states not defined in CRI.
|
||||||
|
// The state mapping from internal states to CRI states:
|
||||||
|
// * ready -> ready
|
||||||
|
// * not ready -> not ready
|
||||||
|
// * init -> not exist
|
||||||
|
// * unknown -> not ready
|
||||||
type State uint32
|
type State uint32
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// StateUnknown is unknown state of sandbox. Sandbox
|
// StateInit is init state of sandbox. Sandbox
|
||||||
// is in unknown state before its corresponding sandbox container
|
// is in init state before its corresponding sandbox container
|
||||||
// is created. Sandbox in unknown state should be ignored by most
|
// is created. Sandbox in init state should be ignored by most
|
||||||
// functions, unless the caller needs to update sandbox state.
|
// functions, unless the caller needs to update sandbox state.
|
||||||
StateUnknown State = iota
|
StateInit State = iota
|
||||||
// StateReady is ready state, it means sandbox container
|
// StateReady is ready state, it means sandbox container
|
||||||
// is running.
|
// is running.
|
||||||
StateReady
|
StateReady
|
||||||
@ -40,6 +76,9 @@ const (
|
|||||||
// cleanup resources other than sandbox container, e.g. network namespace.
|
// cleanup resources other than sandbox container, e.g. network namespace.
|
||||||
// This is an assumption made in CRI.
|
// This is an assumption made in CRI.
|
||||||
StateNotReady
|
StateNotReady
|
||||||
|
// StateUnknown is unknown state. Sandbox only goes
|
||||||
|
// into unknown state when its status fails to be loaded.
|
||||||
|
StateUnknown
|
||||||
)
|
)
|
||||||
|
|
||||||
// Status is the status of a sandbox.
|
// Status is the status of a sandbox.
|
||||||
|
23
vendor/github.com/containerd/cri/pkg/util/image.go
generated
vendored
23
vendor/github.com/containerd/cri/pkg/util/image.go
generated
vendored
@ -26,25 +26,8 @@ import (
|
|||||||
// and digest, the function returns digested reference, e.g. docker.io/library/busybox:latest@
|
// and digest, the function returns digested reference, e.g. docker.io/library/busybox:latest@
|
||||||
// sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa will be returned as
|
// sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa will be returned as
|
||||||
// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa.
|
// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa.
|
||||||
|
//
|
||||||
|
// Deprecated: use github.com/docker/reference.ParseDockerRef() instead
|
||||||
func NormalizeImageRef(ref string) (reference.Named, error) {
|
func NormalizeImageRef(ref string) (reference.Named, error) {
|
||||||
named, err := reference.ParseNormalizedNamed(ref)
|
return reference.ParseDockerRef(ref)
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if _, ok := named.(reference.NamedTagged); ok {
|
|
||||||
if canonical, ok := named.(reference.Canonical); ok {
|
|
||||||
// The reference is both tagged and digested, only
|
|
||||||
// return digested.
|
|
||||||
newNamed, err := reference.WithName(canonical.Name())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
newCanonical, err := reference.WithDigest(newNamed, canonical.Digest())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return newCanonical, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return reference.TagNameOnly(named), nil
|
|
||||||
}
|
}
|
||||||
|
43
vendor/github.com/containerd/cri/vendor.conf
generated
vendored
43
vendor/github.com/containerd/cri/vendor.conf
generated
vendored
@ -1,20 +1,19 @@
|
|||||||
github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
|
github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
|
||||||
github.com/blang/semver v3.1.0
|
|
||||||
github.com/BurntSushi/toml a368813c5e648fee92e5f6c30e3944ff9d5e8895
|
github.com/BurntSushi/toml a368813c5e648fee92e5f6c30e3944ff9d5e8895
|
||||||
github.com/containerd/cgroups 5e610833b72089b37d0e615de9a92dfc043757c2
|
github.com/containerd/cgroups 1152b960fcee041f50df15cdc67c29dbccf801ef
|
||||||
github.com/containerd/console c12b1e7919c14469339a5d38f2f8ed9b64a9de23
|
github.com/containerd/console c12b1e7919c14469339a5d38f2f8ed9b64a9de23
|
||||||
github.com/containerd/containerd 6937c5a3ba8280edff9e9030767e3b0cb742581c
|
github.com/containerd/containerd f2a20ead833f8caf3ffc12be058d6ce668b4ebed
|
||||||
github.com/containerd/continuity bd77b46c8352f74eb12c85bdc01f4b90f69d66b4
|
github.com/containerd/continuity bd77b46c8352f74eb12c85bdc01f4b90f69d66b4
|
||||||
github.com/containerd/fifo 3d5202aec260678c48179c56f40e6f38a095738c
|
github.com/containerd/fifo 3d5202aec260678c48179c56f40e6f38a095738c
|
||||||
github.com/containerd/go-cni 40bcf8ec8acd7372be1d77031d585d5d8e561c90
|
github.com/containerd/go-cni 40bcf8ec8acd7372be1d77031d585d5d8e561c90
|
||||||
github.com/containerd/go-runc 5a6d9f37cfa36b15efba46dc7ea349fa9b7143c3
|
github.com/containerd/go-runc 5a6d9f37cfa36b15efba46dc7ea349fa9b7143c3
|
||||||
github.com/containerd/ttrpc 2a805f71863501300ae1976d29f0454ae003e85a
|
github.com/containerd/ttrpc f02858b1457c5ca3aaec3a0803eb0d59f96e41d6
|
||||||
github.com/containerd/typeurl a93fcdb778cd272c6e9b3028b2f42d813e785d40
|
github.com/containerd/typeurl a93fcdb778cd272c6e9b3028b2f42d813e785d40
|
||||||
github.com/containernetworking/cni v0.6.0
|
github.com/containernetworking/cni v0.6.0
|
||||||
github.com/containernetworking/plugins v0.7.0
|
github.com/containernetworking/plugins v0.7.5
|
||||||
github.com/coreos/go-systemd v14
|
github.com/coreos/go-systemd v14
|
||||||
github.com/davecgh/go-spew v1.1.0
|
github.com/davecgh/go-spew v1.1.0
|
||||||
github.com/docker/distribution b38e5838b7b2f2ad48e06ec4b500011976080621
|
github.com/docker/distribution 0d3efadf0154c2b8a4e7b6621fff9809655cc580
|
||||||
github.com/docker/docker 86f080cff0914e9694068ed78d503701667c4c00
|
github.com/docker/docker 86f080cff0914e9694068ed78d503701667c4c00
|
||||||
github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
|
github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
|
||||||
github.com/docker/go-metrics 4ea375f7759c82740c893fc030bc37088d2ec098
|
github.com/docker/go-metrics 4ea375f7759c82740c893fc030bc37088d2ec098
|
||||||
@ -27,20 +26,17 @@ github.com/gogo/protobuf v1.0.0
|
|||||||
github.com/golang/protobuf v1.1.0
|
github.com/golang/protobuf v1.1.0
|
||||||
github.com/google/gofuzz 44d81051d367757e1c7c6a5a86423ece9afcf63c
|
github.com/google/gofuzz 44d81051d367757e1c7c6a5a86423ece9afcf63c
|
||||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.1
|
github.com/grpc-ecosystem/go-grpc-prometheus v1.1
|
||||||
github.com/hashicorp/errwrap 7554cd9344cec97297fa6649b055a8c98c2a1e55
|
|
||||||
github.com/hashicorp/go-multierror ed905158d87462226a13fe39ddf685ea65f1c11f
|
|
||||||
github.com/json-iterator/go 1.1.5
|
github.com/json-iterator/go 1.1.5
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.0
|
github.com/matttproud/golang_protobuf_extensions v1.0.0
|
||||||
github.com/Microsoft/go-winio v0.4.11
|
github.com/Microsoft/go-winio c599b533b43b1363d7d7c6cfda5ede70ed73ff13
|
||||||
github.com/Microsoft/hcsshim v0.8.2
|
github.com/Microsoft/hcsshim 8abdbb8205e4192c68b5f84c31197156f31be517
|
||||||
github.com/modern-go/concurrent 1.0.3
|
github.com/modern-go/concurrent 1.0.3
|
||||||
github.com/modern-go/reflect2 1.0.1
|
github.com/modern-go/reflect2 1.0.1
|
||||||
github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7
|
github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7
|
||||||
github.com/opencontainers/image-spec v1.0.1
|
github.com/opencontainers/image-spec v1.0.1
|
||||||
github.com/opencontainers/runc v1.0.0-rc6
|
github.com/opencontainers/runc 12f6a991201fdb8f82579582d5e00e28fba06d0a
|
||||||
github.com/opencontainers/runtime-spec eba862dc2470385a233c7507392675cbeadf7353
|
github.com/opencontainers/runtime-spec eba862dc2470385a233c7507392675cbeadf7353
|
||||||
github.com/opencontainers/runtime-tools fb101d5d42ab9c040f7d0a004e78336e5d5cb197
|
github.com/opencontainers/selinux v1.2.1
|
||||||
github.com/opencontainers/selinux b6fa367ed7f534f9ba25391cc2d467085dbb445a
|
|
||||||
github.com/pkg/errors v0.8.0
|
github.com/pkg/errors v0.8.0
|
||||||
github.com/pmezard/go-difflib v1.0.0
|
github.com/pmezard/go-difflib v1.0.0
|
||||||
github.com/prometheus/client_golang f4fb1b73fb099f396a7f0036bf86aa8def4ed823
|
github.com/prometheus/client_golang f4fb1b73fb099f396a7f0036bf86aa8def4ed823
|
||||||
@ -53,26 +49,23 @@ github.com/stretchr/testify v1.1.4
|
|||||||
github.com/syndtr/gocapability db04d3cc01c8b54962a58ec7e491717d06cfcc16
|
github.com/syndtr/gocapability db04d3cc01c8b54962a58ec7e491717d06cfcc16
|
||||||
github.com/tchap/go-patricia v2.2.6
|
github.com/tchap/go-patricia v2.2.6
|
||||||
github.com/urfave/cli 7bc6a0acffa589f415f88aca16cc1de5ffd66f9c
|
github.com/urfave/cli 7bc6a0acffa589f415f88aca16cc1de5ffd66f9c
|
||||||
github.com/xeipuuv/gojsonpointer 4e3ac2762d5f479393488629ee9370b50873b3a6
|
|
||||||
github.com/xeipuuv/gojsonreference bd5ef7bd5415a7ac448318e64f11a24cd21e594b
|
|
||||||
github.com/xeipuuv/gojsonschema 1d523034197ff1f222f6429836dd36a2457a1874
|
|
||||||
go.etcd.io/bbolt v1.3.1-etcd.8
|
go.etcd.io/bbolt v1.3.1-etcd.8
|
||||||
golang.org/x/crypto 49796115aa4b964c318aad4f3084fdb41e9aa067
|
golang.org/x/crypto 49796115aa4b964c318aad4f3084fdb41e9aa067
|
||||||
golang.org/x/net b3756b4b77d7b13260a0a2ec658753cf48922eac
|
golang.org/x/net b3756b4b77d7b13260a0a2ec658753cf48922eac
|
||||||
golang.org/x/oauth2 a6bd8cefa1811bd24b86f8902872e4e8225f74c4
|
golang.org/x/oauth2 a6bd8cefa1811bd24b86f8902872e4e8225f74c4
|
||||||
golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c
|
golang.org/x/sync 42b317875d0fa942474b76e1b46a6060d720ae6e
|
||||||
golang.org/x/sys 1b2967e3c290b7c545b3db0deeda16e9be4f98a2 https://github.com/golang/sys
|
golang.org/x/sys d455e41777fca6e8a5a79e34a14b8368bc11d9ba https://github.com/golang/sys
|
||||||
golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4
|
golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4
|
||||||
golang.org/x/time f51c12702a4d776e4c1fa9b0fabab841babae631
|
golang.org/x/time f51c12702a4d776e4c1fa9b0fabab841babae631
|
||||||
google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
|
google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
|
||||||
google.golang.org/grpc v1.12.0
|
google.golang.org/grpc v1.12.0
|
||||||
gopkg.in/inf.v0 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4
|
gopkg.in/inf.v0 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4
|
||||||
gopkg.in/yaml.v2 v2.2.1
|
gopkg.in/yaml.v2 v2.2.1
|
||||||
k8s.io/api kubernetes-1.13.0
|
k8s.io/api kubernetes-1.15.0-alpha.0
|
||||||
k8s.io/apimachinery kubernetes-1.13.0
|
k8s.io/apimachinery kubernetes-1.15.0-alpha.0
|
||||||
k8s.io/apiserver kubernetes-1.13.0
|
k8s.io/apiserver kubernetes-1.15.0-alpha.0
|
||||||
k8s.io/client-go kubernetes-1.13.0
|
k8s.io/client-go kubernetes-1.15.0-alpha.0
|
||||||
k8s.io/klog 8139d8cb77af419532b33dfa7dd09fbc5f1d344f
|
k8s.io/klog 8145543d67ada0bd556af97faeeb8a65a2651c98
|
||||||
k8s.io/kubernetes v1.13.0
|
k8s.io/kubernetes v1.15.0-alpha.0
|
||||||
k8s.io/utils 0d26856f57b32ec3398579285e5c8a2bfe8c5243
|
k8s.io/utils c2654d5206da6b7b6ace12841e8f359bb89b443c
|
||||||
sigs.k8s.io/yaml v1.1.0
|
sigs.k8s.io/yaml v1.1.0
|
||||||
|
51
vendor/github.com/docker/docker/pkg/ioutils/buffer.go
generated
vendored
51
vendor/github.com/docker/docker/pkg/ioutils/buffer.go
generated
vendored
@ -1,51 +0,0 @@
|
|||||||
package ioutils
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
var errBufferFull = errors.New("buffer is full")
|
|
||||||
|
|
||||||
type fixedBuffer struct {
|
|
||||||
buf []byte
|
|
||||||
pos int
|
|
||||||
lastRead int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *fixedBuffer) Write(p []byte) (int, error) {
|
|
||||||
n := copy(b.buf[b.pos:cap(b.buf)], p)
|
|
||||||
b.pos += n
|
|
||||||
|
|
||||||
if n < len(p) {
|
|
||||||
if b.pos == cap(b.buf) {
|
|
||||||
return n, errBufferFull
|
|
||||||
}
|
|
||||||
return n, io.ErrShortWrite
|
|
||||||
}
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *fixedBuffer) Read(p []byte) (int, error) {
|
|
||||||
n := copy(p, b.buf[b.lastRead:b.pos])
|
|
||||||
b.lastRead += n
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *fixedBuffer) Len() int {
|
|
||||||
return b.pos - b.lastRead
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *fixedBuffer) Cap() int {
|
|
||||||
return cap(b.buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *fixedBuffer) Reset() {
|
|
||||||
b.pos = 0
|
|
||||||
b.lastRead = 0
|
|
||||||
b.buf = b.buf[:0]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *fixedBuffer) String() string {
|
|
||||||
return string(b.buf[b.lastRead:b.pos])
|
|
||||||
}
|
|
186
vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go
generated
vendored
186
vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go
generated
vendored
@ -1,186 +0,0 @@
|
|||||||
package ioutils
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
// maxCap is the highest capacity to use in byte slices that buffer data.
|
|
||||||
const maxCap = 1e6
|
|
||||||
|
|
||||||
// minCap is the lowest capacity to use in byte slices that buffer data
|
|
||||||
const minCap = 64
|
|
||||||
|
|
||||||
// blockThreshold is the minimum number of bytes in the buffer which will cause
|
|
||||||
// a write to BytesPipe to block when allocating a new slice.
|
|
||||||
const blockThreshold = 1e6
|
|
||||||
|
|
||||||
var (
|
|
||||||
// ErrClosed is returned when Write is called on a closed BytesPipe.
|
|
||||||
ErrClosed = errors.New("write to closed BytesPipe")
|
|
||||||
|
|
||||||
bufPools = make(map[int]*sync.Pool)
|
|
||||||
bufPoolsLock sync.Mutex
|
|
||||||
)
|
|
||||||
|
|
||||||
// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue).
|
|
||||||
// All written data may be read at most once. Also, BytesPipe allocates
|
|
||||||
// and releases new byte slices to adjust to current needs, so the buffer
|
|
||||||
// won't be overgrown after peak loads.
|
|
||||||
type BytesPipe struct {
|
|
||||||
mu sync.Mutex
|
|
||||||
wait *sync.Cond
|
|
||||||
buf []*fixedBuffer
|
|
||||||
bufLen int
|
|
||||||
closeErr error // error to return from next Read. set to nil if not closed.
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBytesPipe creates new BytesPipe, initialized by specified slice.
|
|
||||||
// If buf is nil, then it will be initialized with slice which cap is 64.
|
|
||||||
// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf).
|
|
||||||
func NewBytesPipe() *BytesPipe {
|
|
||||||
bp := &BytesPipe{}
|
|
||||||
bp.buf = append(bp.buf, getBuffer(minCap))
|
|
||||||
bp.wait = sync.NewCond(&bp.mu)
|
|
||||||
return bp
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write writes p to BytesPipe.
|
|
||||||
// It can allocate new []byte slices in a process of writing.
|
|
||||||
func (bp *BytesPipe) Write(p []byte) (int, error) {
|
|
||||||
bp.mu.Lock()
|
|
||||||
|
|
||||||
written := 0
|
|
||||||
loop0:
|
|
||||||
for {
|
|
||||||
if bp.closeErr != nil {
|
|
||||||
bp.mu.Unlock()
|
|
||||||
return written, ErrClosed
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(bp.buf) == 0 {
|
|
||||||
bp.buf = append(bp.buf, getBuffer(64))
|
|
||||||
}
|
|
||||||
// get the last buffer
|
|
||||||
b := bp.buf[len(bp.buf)-1]
|
|
||||||
|
|
||||||
n, err := b.Write(p)
|
|
||||||
written += n
|
|
||||||
bp.bufLen += n
|
|
||||||
|
|
||||||
// errBufferFull is an error we expect to get if the buffer is full
|
|
||||||
if err != nil && err != errBufferFull {
|
|
||||||
bp.wait.Broadcast()
|
|
||||||
bp.mu.Unlock()
|
|
||||||
return written, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// if there was enough room to write all then break
|
|
||||||
if len(p) == n {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// more data: write to the next slice
|
|
||||||
p = p[n:]
|
|
||||||
|
|
||||||
// make sure the buffer doesn't grow too big from this write
|
|
||||||
for bp.bufLen >= blockThreshold {
|
|
||||||
bp.wait.Wait()
|
|
||||||
if bp.closeErr != nil {
|
|
||||||
continue loop0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// add new byte slice to the buffers slice and continue writing
|
|
||||||
nextCap := b.Cap() * 2
|
|
||||||
if nextCap > maxCap {
|
|
||||||
nextCap = maxCap
|
|
||||||
}
|
|
||||||
bp.buf = append(bp.buf, getBuffer(nextCap))
|
|
||||||
}
|
|
||||||
bp.wait.Broadcast()
|
|
||||||
bp.mu.Unlock()
|
|
||||||
return written, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CloseWithError causes further reads from a BytesPipe to return immediately.
|
|
||||||
func (bp *BytesPipe) CloseWithError(err error) error {
|
|
||||||
bp.mu.Lock()
|
|
||||||
if err != nil {
|
|
||||||
bp.closeErr = err
|
|
||||||
} else {
|
|
||||||
bp.closeErr = io.EOF
|
|
||||||
}
|
|
||||||
bp.wait.Broadcast()
|
|
||||||
bp.mu.Unlock()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close causes further reads from a BytesPipe to return immediately.
|
|
||||||
func (bp *BytesPipe) Close() error {
|
|
||||||
return bp.CloseWithError(nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read reads bytes from BytesPipe.
|
|
||||||
// Data could be read only once.
|
|
||||||
func (bp *BytesPipe) Read(p []byte) (n int, err error) {
|
|
||||||
bp.mu.Lock()
|
|
||||||
if bp.bufLen == 0 {
|
|
||||||
if bp.closeErr != nil {
|
|
||||||
bp.mu.Unlock()
|
|
||||||
return 0, bp.closeErr
|
|
||||||
}
|
|
||||||
bp.wait.Wait()
|
|
||||||
if bp.bufLen == 0 && bp.closeErr != nil {
|
|
||||||
err := bp.closeErr
|
|
||||||
bp.mu.Unlock()
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for bp.bufLen > 0 {
|
|
||||||
b := bp.buf[0]
|
|
||||||
read, _ := b.Read(p) // ignore error since fixedBuffer doesn't really return an error
|
|
||||||
n += read
|
|
||||||
bp.bufLen -= read
|
|
||||||
|
|
||||||
if b.Len() == 0 {
|
|
||||||
// it's empty so return it to the pool and move to the next one
|
|
||||||
returnBuffer(b)
|
|
||||||
bp.buf[0] = nil
|
|
||||||
bp.buf = bp.buf[1:]
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(p) == read {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
p = p[read:]
|
|
||||||
}
|
|
||||||
|
|
||||||
bp.wait.Broadcast()
|
|
||||||
bp.mu.Unlock()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func returnBuffer(b *fixedBuffer) {
|
|
||||||
b.Reset()
|
|
||||||
bufPoolsLock.Lock()
|
|
||||||
pool := bufPools[b.Cap()]
|
|
||||||
bufPoolsLock.Unlock()
|
|
||||||
if pool != nil {
|
|
||||||
pool.Put(b)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func getBuffer(size int) *fixedBuffer {
|
|
||||||
bufPoolsLock.Lock()
|
|
||||||
pool, ok := bufPools[size]
|
|
||||||
if !ok {
|
|
||||||
pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }}
|
|
||||||
bufPools[size] = pool
|
|
||||||
}
|
|
||||||
bufPoolsLock.Unlock()
|
|
||||||
return pool.Get().(*fixedBuffer)
|
|
||||||
}
|
|
162
vendor/github.com/docker/docker/pkg/ioutils/fswriters.go
generated
vendored
162
vendor/github.com/docker/docker/pkg/ioutils/fswriters.go
generated
vendored
@ -1,162 +0,0 @@
|
|||||||
package ioutils
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NewAtomicFileWriter returns WriteCloser so that writing to it writes to a
|
|
||||||
// temporary file and closing it atomically changes the temporary file to
|
|
||||||
// destination path. Writing and closing concurrently is not allowed.
|
|
||||||
func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) {
|
|
||||||
f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
abspath, err := filepath.Abs(filename)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &atomicFileWriter{
|
|
||||||
f: f,
|
|
||||||
fn: abspath,
|
|
||||||
perm: perm,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// AtomicWriteFile atomically writes data to a file named by filename.
|
|
||||||
func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error {
|
|
||||||
f, err := NewAtomicFileWriter(filename, perm)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
n, err := f.Write(data)
|
|
||||||
if err == nil && n < len(data) {
|
|
||||||
err = io.ErrShortWrite
|
|
||||||
f.(*atomicFileWriter).writeErr = err
|
|
||||||
}
|
|
||||||
if err1 := f.Close(); err == nil {
|
|
||||||
err = err1
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
type atomicFileWriter struct {
|
|
||||||
f *os.File
|
|
||||||
fn string
|
|
||||||
writeErr error
|
|
||||||
perm os.FileMode
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *atomicFileWriter) Write(dt []byte) (int, error) {
|
|
||||||
n, err := w.f.Write(dt)
|
|
||||||
if err != nil {
|
|
||||||
w.writeErr = err
|
|
||||||
}
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *atomicFileWriter) Close() (retErr error) {
|
|
||||||
defer func() {
|
|
||||||
if retErr != nil || w.writeErr != nil {
|
|
||||||
os.Remove(w.f.Name())
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
if err := w.f.Sync(); err != nil {
|
|
||||||
w.f.Close()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := w.f.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := os.Chmod(w.f.Name(), w.perm); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if w.writeErr == nil {
|
|
||||||
return os.Rename(w.f.Name(), w.fn)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// AtomicWriteSet is used to atomically write a set
|
|
||||||
// of files and ensure they are visible at the same time.
|
|
||||||
// Must be committed to a new directory.
|
|
||||||
type AtomicWriteSet struct {
|
|
||||||
root string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewAtomicWriteSet creates a new atomic write set to
|
|
||||||
// atomically create a set of files. The given directory
|
|
||||||
// is used as the base directory for storing files before
|
|
||||||
// commit. If no temporary directory is given the system
|
|
||||||
// default is used.
|
|
||||||
func NewAtomicWriteSet(tmpDir string) (*AtomicWriteSet, error) {
|
|
||||||
td, err := ioutil.TempDir(tmpDir, "write-set-")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &AtomicWriteSet{
|
|
||||||
root: td,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteFile writes a file to the set, guaranteeing the file
|
|
||||||
// has been synced.
|
|
||||||
func (ws *AtomicWriteSet) WriteFile(filename string, data []byte, perm os.FileMode) error {
|
|
||||||
f, err := ws.FileWriter(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
n, err := f.Write(data)
|
|
||||||
if err == nil && n < len(data) {
|
|
||||||
err = io.ErrShortWrite
|
|
||||||
}
|
|
||||||
if err1 := f.Close(); err == nil {
|
|
||||||
err = err1
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
type syncFileCloser struct {
|
|
||||||
*os.File
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w syncFileCloser) Close() error {
|
|
||||||
err := w.File.Sync()
|
|
||||||
if err1 := w.File.Close(); err == nil {
|
|
||||||
err = err1
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// FileWriter opens a file writer inside the set. The file
|
|
||||||
// should be synced and closed before calling commit.
|
|
||||||
func (ws *AtomicWriteSet) FileWriter(name string, flag int, perm os.FileMode) (io.WriteCloser, error) {
|
|
||||||
f, err := os.OpenFile(filepath.Join(ws.root, name), flag, perm)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return syncFileCloser{f}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Cancel cancels the set and removes all temporary data
|
|
||||||
// created in the set.
|
|
||||||
func (ws *AtomicWriteSet) Cancel() error {
|
|
||||||
return os.RemoveAll(ws.root)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Commit moves all created files to the target directory. The
|
|
||||||
// target directory must not exist and the parent of the target
|
|
||||||
// directory must exist.
|
|
||||||
func (ws *AtomicWriteSet) Commit(target string) error {
|
|
||||||
return os.Rename(ws.root, target)
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the location the set is writing to.
|
|
||||||
func (ws *AtomicWriteSet) String() string {
|
|
||||||
return ws.root
|
|
||||||
}
|
|
154
vendor/github.com/docker/docker/pkg/ioutils/readers.go
generated
vendored
154
vendor/github.com/docker/docker/pkg/ioutils/readers.go
generated
vendored
@ -1,154 +0,0 @@
|
|||||||
package ioutils
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/sha256"
|
|
||||||
"encoding/hex"
|
|
||||||
"io"
|
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
)
|
|
||||||
|
|
||||||
type readCloserWrapper struct {
|
|
||||||
io.Reader
|
|
||||||
closer func() error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *readCloserWrapper) Close() error {
|
|
||||||
return r.closer()
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewReadCloserWrapper returns a new io.ReadCloser.
|
|
||||||
func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser {
|
|
||||||
return &readCloserWrapper{
|
|
||||||
Reader: r,
|
|
||||||
closer: closer,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type readerErrWrapper struct {
|
|
||||||
reader io.Reader
|
|
||||||
closer func()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *readerErrWrapper) Read(p []byte) (int, error) {
|
|
||||||
n, err := r.reader.Read(p)
|
|
||||||
if err != nil {
|
|
||||||
r.closer()
|
|
||||||
}
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewReaderErrWrapper returns a new io.Reader.
|
|
||||||
func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader {
|
|
||||||
return &readerErrWrapper{
|
|
||||||
reader: r,
|
|
||||||
closer: closer,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// HashData returns the sha256 sum of src.
|
|
||||||
func HashData(src io.Reader) (string, error) {
|
|
||||||
h := sha256.New()
|
|
||||||
if _, err := io.Copy(h, src); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// OnEOFReader wraps an io.ReadCloser and a function
|
|
||||||
// the function will run at the end of file or close the file.
|
|
||||||
type OnEOFReader struct {
|
|
||||||
Rc io.ReadCloser
|
|
||||||
Fn func()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *OnEOFReader) Read(p []byte) (n int, err error) {
|
|
||||||
n, err = r.Rc.Read(p)
|
|
||||||
if err == io.EOF {
|
|
||||||
r.runFunc()
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the file and run the function.
|
|
||||||
func (r *OnEOFReader) Close() error {
|
|
||||||
err := r.Rc.Close()
|
|
||||||
r.runFunc()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *OnEOFReader) runFunc() {
|
|
||||||
if fn := r.Fn; fn != nil {
|
|
||||||
fn()
|
|
||||||
r.Fn = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read
|
|
||||||
// operations.
|
|
||||||
type cancelReadCloser struct {
|
|
||||||
cancel func()
|
|
||||||
pR *io.PipeReader // Stream to read from
|
|
||||||
pW *io.PipeWriter
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the
|
|
||||||
// context is cancelled. The returned io.ReadCloser must be closed when it is
|
|
||||||
// no longer needed.
|
|
||||||
func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser {
|
|
||||||
pR, pW := io.Pipe()
|
|
||||||
|
|
||||||
// Create a context used to signal when the pipe is closed
|
|
||||||
doneCtx, cancel := context.WithCancel(context.Background())
|
|
||||||
|
|
||||||
p := &cancelReadCloser{
|
|
||||||
cancel: cancel,
|
|
||||||
pR: pR,
|
|
||||||
pW: pW,
|
|
||||||
}
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
_, err := io.Copy(pW, in)
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
// If the context was closed, p.closeWithError
|
|
||||||
// was already called. Calling it again would
|
|
||||||
// change the error that Read returns.
|
|
||||||
default:
|
|
||||||
p.closeWithError(err)
|
|
||||||
}
|
|
||||||
in.Close()
|
|
||||||
}()
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
p.closeWithError(ctx.Err())
|
|
||||||
case <-doneCtx.Done():
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read wraps the Read method of the pipe that provides data from the wrapped
|
|
||||||
// ReadCloser.
|
|
||||||
func (p *cancelReadCloser) Read(buf []byte) (n int, err error) {
|
|
||||||
return p.pR.Read(buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
// closeWithError closes the wrapper and its underlying reader. It will
|
|
||||||
// cause future calls to Read to return err.
|
|
||||||
func (p *cancelReadCloser) closeWithError(err error) {
|
|
||||||
p.pW.CloseWithError(err)
|
|
||||||
p.cancel()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the wrapper its underlying reader. It will cause
|
|
||||||
// future calls to Read to return io.EOF.
|
|
||||||
func (p *cancelReadCloser) Close() error {
|
|
||||||
p.closeWithError(io.EOF)
|
|
||||||
return nil
|
|
||||||
}
|
|
10
vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go
generated
vendored
10
vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go
generated
vendored
@ -1,10 +0,0 @@
|
|||||||
// +build !windows
|
|
||||||
|
|
||||||
package ioutils
|
|
||||||
|
|
||||||
import "io/ioutil"
|
|
||||||
|
|
||||||
// TempDir on Unix systems is equivalent to ioutil.TempDir.
|
|
||||||
func TempDir(dir, prefix string) (string, error) {
|
|
||||||
return ioutil.TempDir(dir, prefix)
|
|
||||||
}
|
|
18
vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go
generated
vendored
18
vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go
generated
vendored
@ -1,18 +0,0 @@
|
|||||||
// +build windows
|
|
||||||
|
|
||||||
package ioutils
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io/ioutil"
|
|
||||||
|
|
||||||
"github.com/docker/docker/pkg/longpath"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format.
|
|
||||||
func TempDir(dir, prefix string) (string, error) {
|
|
||||||
tempDir, err := ioutil.TempDir(dir, prefix)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return longpath.AddPrefix(tempDir), nil
|
|
||||||
}
|
|
92
vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go
generated
vendored
92
vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go
generated
vendored
@ -1,92 +0,0 @@
|
|||||||
package ioutils
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
// WriteFlusher wraps the Write and Flush operation ensuring that every write
|
|
||||||
// is a flush. In addition, the Close method can be called to intercept
|
|
||||||
// Read/Write calls if the targets lifecycle has already ended.
|
|
||||||
type WriteFlusher struct {
|
|
||||||
w io.Writer
|
|
||||||
flusher flusher
|
|
||||||
flushed chan struct{}
|
|
||||||
flushedOnce sync.Once
|
|
||||||
closed chan struct{}
|
|
||||||
closeLock sync.Mutex
|
|
||||||
}
|
|
||||||
|
|
||||||
type flusher interface {
|
|
||||||
Flush()
|
|
||||||
}
|
|
||||||
|
|
||||||
var errWriteFlusherClosed = io.EOF
|
|
||||||
|
|
||||||
func (wf *WriteFlusher) Write(b []byte) (n int, err error) {
|
|
||||||
select {
|
|
||||||
case <-wf.closed:
|
|
||||||
return 0, errWriteFlusherClosed
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
|
|
||||||
n, err = wf.w.Write(b)
|
|
||||||
wf.Flush() // every write is a flush.
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Flush the stream immediately.
|
|
||||||
func (wf *WriteFlusher) Flush() {
|
|
||||||
select {
|
|
||||||
case <-wf.closed:
|
|
||||||
return
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
|
|
||||||
wf.flushedOnce.Do(func() {
|
|
||||||
close(wf.flushed)
|
|
||||||
})
|
|
||||||
wf.flusher.Flush()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Flushed returns the state of flushed.
|
|
||||||
// If it's flushed, return true, or else it return false.
|
|
||||||
func (wf *WriteFlusher) Flushed() bool {
|
|
||||||
// BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to
|
|
||||||
// be used to detect whether or a response code has been issued or not.
|
|
||||||
// Another hook should be used instead.
|
|
||||||
var flushed bool
|
|
||||||
select {
|
|
||||||
case <-wf.flushed:
|
|
||||||
flushed = true
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
return flushed
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the write flusher, disallowing any further writes to the
|
|
||||||
// target. After the flusher is closed, all calls to write or flush will
|
|
||||||
// result in an error.
|
|
||||||
func (wf *WriteFlusher) Close() error {
|
|
||||||
wf.closeLock.Lock()
|
|
||||||
defer wf.closeLock.Unlock()
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-wf.closed:
|
|
||||||
return errWriteFlusherClosed
|
|
||||||
default:
|
|
||||||
close(wf.closed)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewWriteFlusher returns a new WriteFlusher.
|
|
||||||
func NewWriteFlusher(w io.Writer) *WriteFlusher {
|
|
||||||
var fl flusher
|
|
||||||
if f, ok := w.(flusher); ok {
|
|
||||||
fl = f
|
|
||||||
} else {
|
|
||||||
fl = &NopFlusher{}
|
|
||||||
}
|
|
||||||
return &WriteFlusher{w: w, flusher: fl, closed: make(chan struct{}), flushed: make(chan struct{})}
|
|
||||||
}
|
|
66
vendor/github.com/docker/docker/pkg/ioutils/writers.go
generated
vendored
66
vendor/github.com/docker/docker/pkg/ioutils/writers.go
generated
vendored
@ -1,66 +0,0 @@
|
|||||||
package ioutils
|
|
||||||
|
|
||||||
import "io"
|
|
||||||
|
|
||||||
// NopWriter represents a type which write operation is nop.
|
|
||||||
type NopWriter struct{}
|
|
||||||
|
|
||||||
func (*NopWriter) Write(buf []byte) (int, error) {
|
|
||||||
return len(buf), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type nopWriteCloser struct {
|
|
||||||
io.Writer
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *nopWriteCloser) Close() error { return nil }
|
|
||||||
|
|
||||||
// NopWriteCloser returns a nopWriteCloser.
|
|
||||||
func NopWriteCloser(w io.Writer) io.WriteCloser {
|
|
||||||
return &nopWriteCloser{w}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NopFlusher represents a type which flush operation is nop.
|
|
||||||
type NopFlusher struct{}
|
|
||||||
|
|
||||||
// Flush is a nop operation.
|
|
||||||
func (f *NopFlusher) Flush() {}
|
|
||||||
|
|
||||||
type writeCloserWrapper struct {
|
|
||||||
io.Writer
|
|
||||||
closer func() error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *writeCloserWrapper) Close() error {
|
|
||||||
return r.closer()
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewWriteCloserWrapper returns a new io.WriteCloser.
|
|
||||||
func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser {
|
|
||||||
return &writeCloserWrapper{
|
|
||||||
Writer: r,
|
|
||||||
closer: closer,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteCounter wraps a concrete io.Writer and hold a count of the number
|
|
||||||
// of bytes written to the writer during a "session".
|
|
||||||
// This can be convenient when write return is masked
|
|
||||||
// (e.g., json.Encoder.Encode())
|
|
||||||
type WriteCounter struct {
|
|
||||||
Count int64
|
|
||||||
Writer io.Writer
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewWriteCounter returns a new WriteCounter.
|
|
||||||
func NewWriteCounter(w io.Writer) *WriteCounter {
|
|
||||||
return &WriteCounter{
|
|
||||||
Writer: w,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (wc *WriteCounter) Write(p []byte) (count int, err error) {
|
|
||||||
count, err = wc.Writer.Write(p)
|
|
||||||
wc.Count += int64(count)
|
|
||||||
return
|
|
||||||
}
|
|
1
vendor/github.com/docker/docker/pkg/signal/README.md
generated
vendored
1
vendor/github.com/docker/docker/pkg/signal/README.md
generated
vendored
@ -1 +0,0 @@
|
|||||||
This package provides helper functions for dealing with signals across various operating systems
|
|
54
vendor/github.com/docker/docker/pkg/signal/signal.go
generated
vendored
54
vendor/github.com/docker/docker/pkg/signal/signal.go
generated
vendored
@ -1,54 +0,0 @@
|
|||||||
// Package signal provides helper functions for dealing with signals across
|
|
||||||
// various operating systems.
|
|
||||||
package signal
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"os/signal"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"syscall"
|
|
||||||
)
|
|
||||||
|
|
||||||
// CatchAll catches all signals and relays them to the specified channel.
|
|
||||||
func CatchAll(sigc chan os.Signal) {
|
|
||||||
handledSigs := []os.Signal{}
|
|
||||||
for _, s := range SignalMap {
|
|
||||||
handledSigs = append(handledSigs, s)
|
|
||||||
}
|
|
||||||
signal.Notify(sigc, handledSigs...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// StopCatch stops catching the signals and closes the specified channel.
|
|
||||||
func StopCatch(sigc chan os.Signal) {
|
|
||||||
signal.Stop(sigc)
|
|
||||||
close(sigc)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseSignal translates a string to a valid syscall signal.
|
|
||||||
// It returns an error if the signal map doesn't include the given signal.
|
|
||||||
func ParseSignal(rawSignal string) (syscall.Signal, error) {
|
|
||||||
s, err := strconv.Atoi(rawSignal)
|
|
||||||
if err == nil {
|
|
||||||
if s == 0 {
|
|
||||||
return -1, fmt.Errorf("Invalid signal: %s", rawSignal)
|
|
||||||
}
|
|
||||||
return syscall.Signal(s), nil
|
|
||||||
}
|
|
||||||
signal, ok := SignalMap[strings.TrimPrefix(strings.ToUpper(rawSignal), "SIG")]
|
|
||||||
if !ok {
|
|
||||||
return -1, fmt.Errorf("Invalid signal: %s", rawSignal)
|
|
||||||
}
|
|
||||||
return signal, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidSignalForPlatform returns true if a signal is valid on the platform
|
|
||||||
func ValidSignalForPlatform(sig syscall.Signal) bool {
|
|
||||||
for _, v := range SignalMap {
|
|
||||||
if v == sig {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
41
vendor/github.com/docker/docker/pkg/signal/signal_darwin.go
generated
vendored
41
vendor/github.com/docker/docker/pkg/signal/signal_darwin.go
generated
vendored
@ -1,41 +0,0 @@
|
|||||||
package signal
|
|
||||||
|
|
||||||
import (
|
|
||||||
"syscall"
|
|
||||||
)
|
|
||||||
|
|
||||||
// SignalMap is a map of Darwin signals.
|
|
||||||
var SignalMap = map[string]syscall.Signal{
|
|
||||||
"ABRT": syscall.SIGABRT,
|
|
||||||
"ALRM": syscall.SIGALRM,
|
|
||||||
"BUG": syscall.SIGBUS,
|
|
||||||
"CHLD": syscall.SIGCHLD,
|
|
||||||
"CONT": syscall.SIGCONT,
|
|
||||||
"EMT": syscall.SIGEMT,
|
|
||||||
"FPE": syscall.SIGFPE,
|
|
||||||
"HUP": syscall.SIGHUP,
|
|
||||||
"ILL": syscall.SIGILL,
|
|
||||||
"INFO": syscall.SIGINFO,
|
|
||||||
"INT": syscall.SIGINT,
|
|
||||||
"IO": syscall.SIGIO,
|
|
||||||
"IOT": syscall.SIGIOT,
|
|
||||||
"KILL": syscall.SIGKILL,
|
|
||||||
"PIPE": syscall.SIGPIPE,
|
|
||||||
"PROF": syscall.SIGPROF,
|
|
||||||
"QUIT": syscall.SIGQUIT,
|
|
||||||
"SEGV": syscall.SIGSEGV,
|
|
||||||
"STOP": syscall.SIGSTOP,
|
|
||||||
"SYS": syscall.SIGSYS,
|
|
||||||
"TERM": syscall.SIGTERM,
|
|
||||||
"TRAP": syscall.SIGTRAP,
|
|
||||||
"TSTP": syscall.SIGTSTP,
|
|
||||||
"TTIN": syscall.SIGTTIN,
|
|
||||||
"TTOU": syscall.SIGTTOU,
|
|
||||||
"URG": syscall.SIGURG,
|
|
||||||
"USR1": syscall.SIGUSR1,
|
|
||||||
"USR2": syscall.SIGUSR2,
|
|
||||||
"VTALRM": syscall.SIGVTALRM,
|
|
||||||
"WINCH": syscall.SIGWINCH,
|
|
||||||
"XCPU": syscall.SIGXCPU,
|
|
||||||
"XFSZ": syscall.SIGXFSZ,
|
|
||||||
}
|
|
43
vendor/github.com/docker/docker/pkg/signal/signal_freebsd.go
generated
vendored
43
vendor/github.com/docker/docker/pkg/signal/signal_freebsd.go
generated
vendored
@ -1,43 +0,0 @@
|
|||||||
package signal
|
|
||||||
|
|
||||||
import (
|
|
||||||
"syscall"
|
|
||||||
)
|
|
||||||
|
|
||||||
// SignalMap is a map of FreeBSD signals.
|
|
||||||
var SignalMap = map[string]syscall.Signal{
|
|
||||||
"ABRT": syscall.SIGABRT,
|
|
||||||
"ALRM": syscall.SIGALRM,
|
|
||||||
"BUF": syscall.SIGBUS,
|
|
||||||
"CHLD": syscall.SIGCHLD,
|
|
||||||
"CONT": syscall.SIGCONT,
|
|
||||||
"EMT": syscall.SIGEMT,
|
|
||||||
"FPE": syscall.SIGFPE,
|
|
||||||
"HUP": syscall.SIGHUP,
|
|
||||||
"ILL": syscall.SIGILL,
|
|
||||||
"INFO": syscall.SIGINFO,
|
|
||||||
"INT": syscall.SIGINT,
|
|
||||||
"IO": syscall.SIGIO,
|
|
||||||
"IOT": syscall.SIGIOT,
|
|
||||||
"KILL": syscall.SIGKILL,
|
|
||||||
"LWP": syscall.SIGLWP,
|
|
||||||
"PIPE": syscall.SIGPIPE,
|
|
||||||
"PROF": syscall.SIGPROF,
|
|
||||||
"QUIT": syscall.SIGQUIT,
|
|
||||||
"SEGV": syscall.SIGSEGV,
|
|
||||||
"STOP": syscall.SIGSTOP,
|
|
||||||
"SYS": syscall.SIGSYS,
|
|
||||||
"TERM": syscall.SIGTERM,
|
|
||||||
"THR": syscall.SIGTHR,
|
|
||||||
"TRAP": syscall.SIGTRAP,
|
|
||||||
"TSTP": syscall.SIGTSTP,
|
|
||||||
"TTIN": syscall.SIGTTIN,
|
|
||||||
"TTOU": syscall.SIGTTOU,
|
|
||||||
"URG": syscall.SIGURG,
|
|
||||||
"USR1": syscall.SIGUSR1,
|
|
||||||
"USR2": syscall.SIGUSR2,
|
|
||||||
"VTALRM": syscall.SIGVTALRM,
|
|
||||||
"WINCH": syscall.SIGWINCH,
|
|
||||||
"XCPU": syscall.SIGXCPU,
|
|
||||||
"XFSZ": syscall.SIGXFSZ,
|
|
||||||
}
|
|
81
vendor/github.com/docker/docker/pkg/signal/signal_linux.go
generated
vendored
81
vendor/github.com/docker/docker/pkg/signal/signal_linux.go
generated
vendored
@ -1,81 +0,0 @@
|
|||||||
package signal
|
|
||||||
|
|
||||||
import (
|
|
||||||
"syscall"
|
|
||||||
|
|
||||||
"golang.org/x/sys/unix"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
sigrtmin = 34
|
|
||||||
sigrtmax = 64
|
|
||||||
)
|
|
||||||
|
|
||||||
// SignalMap is a map of Linux signals.
|
|
||||||
var SignalMap = map[string]syscall.Signal{
|
|
||||||
"ABRT": unix.SIGABRT,
|
|
||||||
"ALRM": unix.SIGALRM,
|
|
||||||
"BUS": unix.SIGBUS,
|
|
||||||
"CHLD": unix.SIGCHLD,
|
|
||||||
"CLD": unix.SIGCLD,
|
|
||||||
"CONT": unix.SIGCONT,
|
|
||||||
"FPE": unix.SIGFPE,
|
|
||||||
"HUP": unix.SIGHUP,
|
|
||||||
"ILL": unix.SIGILL,
|
|
||||||
"INT": unix.SIGINT,
|
|
||||||
"IO": unix.SIGIO,
|
|
||||||
"IOT": unix.SIGIOT,
|
|
||||||
"KILL": unix.SIGKILL,
|
|
||||||
"PIPE": unix.SIGPIPE,
|
|
||||||
"POLL": unix.SIGPOLL,
|
|
||||||
"PROF": unix.SIGPROF,
|
|
||||||
"PWR": unix.SIGPWR,
|
|
||||||
"QUIT": unix.SIGQUIT,
|
|
||||||
"SEGV": unix.SIGSEGV,
|
|
||||||
"STKFLT": unix.SIGSTKFLT,
|
|
||||||
"STOP": unix.SIGSTOP,
|
|
||||||
"SYS": unix.SIGSYS,
|
|
||||||
"TERM": unix.SIGTERM,
|
|
||||||
"TRAP": unix.SIGTRAP,
|
|
||||||
"TSTP": unix.SIGTSTP,
|
|
||||||
"TTIN": unix.SIGTTIN,
|
|
||||||
"TTOU": unix.SIGTTOU,
|
|
||||||
"URG": unix.SIGURG,
|
|
||||||
"USR1": unix.SIGUSR1,
|
|
||||||
"USR2": unix.SIGUSR2,
|
|
||||||
"VTALRM": unix.SIGVTALRM,
|
|
||||||
"WINCH": unix.SIGWINCH,
|
|
||||||
"XCPU": unix.SIGXCPU,
|
|
||||||
"XFSZ": unix.SIGXFSZ,
|
|
||||||
"RTMIN": sigrtmin,
|
|
||||||
"RTMIN+1": sigrtmin + 1,
|
|
||||||
"RTMIN+2": sigrtmin + 2,
|
|
||||||
"RTMIN+3": sigrtmin + 3,
|
|
||||||
"RTMIN+4": sigrtmin + 4,
|
|
||||||
"RTMIN+5": sigrtmin + 5,
|
|
||||||
"RTMIN+6": sigrtmin + 6,
|
|
||||||
"RTMIN+7": sigrtmin + 7,
|
|
||||||
"RTMIN+8": sigrtmin + 8,
|
|
||||||
"RTMIN+9": sigrtmin + 9,
|
|
||||||
"RTMIN+10": sigrtmin + 10,
|
|
||||||
"RTMIN+11": sigrtmin + 11,
|
|
||||||
"RTMIN+12": sigrtmin + 12,
|
|
||||||
"RTMIN+13": sigrtmin + 13,
|
|
||||||
"RTMIN+14": sigrtmin + 14,
|
|
||||||
"RTMIN+15": sigrtmin + 15,
|
|
||||||
"RTMAX-14": sigrtmax - 14,
|
|
||||||
"RTMAX-13": sigrtmax - 13,
|
|
||||||
"RTMAX-12": sigrtmax - 12,
|
|
||||||
"RTMAX-11": sigrtmax - 11,
|
|
||||||
"RTMAX-10": sigrtmax - 10,
|
|
||||||
"RTMAX-9": sigrtmax - 9,
|
|
||||||
"RTMAX-8": sigrtmax - 8,
|
|
||||||
"RTMAX-7": sigrtmax - 7,
|
|
||||||
"RTMAX-6": sigrtmax - 6,
|
|
||||||
"RTMAX-5": sigrtmax - 5,
|
|
||||||
"RTMAX-4": sigrtmax - 4,
|
|
||||||
"RTMAX-3": sigrtmax - 3,
|
|
||||||
"RTMAX-2": sigrtmax - 2,
|
|
||||||
"RTMAX-1": sigrtmax - 1,
|
|
||||||
"RTMAX": sigrtmax,
|
|
||||||
}
|
|
42
vendor/github.com/docker/docker/pkg/signal/signal_solaris.go
generated
vendored
42
vendor/github.com/docker/docker/pkg/signal/signal_solaris.go
generated
vendored
@ -1,42 +0,0 @@
|
|||||||
package signal
|
|
||||||
|
|
||||||
import (
|
|
||||||
"syscall"
|
|
||||||
)
|
|
||||||
|
|
||||||
// SignalMap is a map of Solaris signals.
|
|
||||||
// SIGINFO and SIGTHR not defined for Solaris
|
|
||||||
var SignalMap = map[string]syscall.Signal{
|
|
||||||
"ABRT": syscall.SIGABRT,
|
|
||||||
"ALRM": syscall.SIGALRM,
|
|
||||||
"BUF": syscall.SIGBUS,
|
|
||||||
"CHLD": syscall.SIGCHLD,
|
|
||||||
"CONT": syscall.SIGCONT,
|
|
||||||
"EMT": syscall.SIGEMT,
|
|
||||||
"FPE": syscall.SIGFPE,
|
|
||||||
"HUP": syscall.SIGHUP,
|
|
||||||
"ILL": syscall.SIGILL,
|
|
||||||
"INT": syscall.SIGINT,
|
|
||||||
"IO": syscall.SIGIO,
|
|
||||||
"IOT": syscall.SIGIOT,
|
|
||||||
"KILL": syscall.SIGKILL,
|
|
||||||
"LWP": syscall.SIGLWP,
|
|
||||||
"PIPE": syscall.SIGPIPE,
|
|
||||||
"PROF": syscall.SIGPROF,
|
|
||||||
"QUIT": syscall.SIGQUIT,
|
|
||||||
"SEGV": syscall.SIGSEGV,
|
|
||||||
"STOP": syscall.SIGSTOP,
|
|
||||||
"SYS": syscall.SIGSYS,
|
|
||||||
"TERM": syscall.SIGTERM,
|
|
||||||
"TRAP": syscall.SIGTRAP,
|
|
||||||
"TSTP": syscall.SIGTSTP,
|
|
||||||
"TTIN": syscall.SIGTTIN,
|
|
||||||
"TTOU": syscall.SIGTTOU,
|
|
||||||
"URG": syscall.SIGURG,
|
|
||||||
"USR1": syscall.SIGUSR1,
|
|
||||||
"USR2": syscall.SIGUSR2,
|
|
||||||
"VTALRM": syscall.SIGVTALRM,
|
|
||||||
"WINCH": syscall.SIGWINCH,
|
|
||||||
"XCPU": syscall.SIGXCPU,
|
|
||||||
"XFSZ": syscall.SIGXFSZ,
|
|
||||||
}
|
|
21
vendor/github.com/docker/docker/pkg/signal/signal_unix.go
generated
vendored
21
vendor/github.com/docker/docker/pkg/signal/signal_unix.go
generated
vendored
@ -1,21 +0,0 @@
|
|||||||
// +build !windows
|
|
||||||
|
|
||||||
package signal
|
|
||||||
|
|
||||||
import (
|
|
||||||
"syscall"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Signals used in cli/command (no windows equivalent, use
|
|
||||||
// invalid signals so they don't get handled)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// SIGCHLD is a signal sent to a process when a child process terminates, is interrupted, or resumes after being interrupted.
|
|
||||||
SIGCHLD = syscall.SIGCHLD
|
|
||||||
// SIGWINCH is a signal sent to a process when its controlling terminal changes its size
|
|
||||||
SIGWINCH = syscall.SIGWINCH
|
|
||||||
// SIGPIPE is a signal sent to a process when a pipe is written to before the other end is open for reading
|
|
||||||
SIGPIPE = syscall.SIGPIPE
|
|
||||||
// DefaultStopSignal is the syscall signal used to stop a container in unix systems.
|
|
||||||
DefaultStopSignal = "SIGTERM"
|
|
||||||
)
|
|
10
vendor/github.com/docker/docker/pkg/signal/signal_unsupported.go
generated
vendored
10
vendor/github.com/docker/docker/pkg/signal/signal_unsupported.go
generated
vendored
@ -1,10 +0,0 @@
|
|||||||
// +build !linux,!darwin,!freebsd,!windows,!solaris
|
|
||||||
|
|
||||||
package signal
|
|
||||||
|
|
||||||
import (
|
|
||||||
"syscall"
|
|
||||||
)
|
|
||||||
|
|
||||||
// SignalMap is an empty map of signals for unsupported platform.
|
|
||||||
var SignalMap = map[string]syscall.Signal{}
|
|
28
vendor/github.com/docker/docker/pkg/signal/signal_windows.go
generated
vendored
28
vendor/github.com/docker/docker/pkg/signal/signal_windows.go
generated
vendored
@ -1,28 +0,0 @@
|
|||||||
// +build windows
|
|
||||||
|
|
||||||
package signal
|
|
||||||
|
|
||||||
import (
|
|
||||||
"syscall"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Signals used in cli/command (no windows equivalent, use
|
|
||||||
// invalid signals so they don't get handled)
|
|
||||||
const (
|
|
||||||
SIGCHLD = syscall.Signal(0xff)
|
|
||||||
SIGWINCH = syscall.Signal(0xff)
|
|
||||||
SIGPIPE = syscall.Signal(0xff)
|
|
||||||
// DefaultStopSignal is the syscall signal used to stop a container in windows systems.
|
|
||||||
DefaultStopSignal = "15"
|
|
||||||
)
|
|
||||||
|
|
||||||
// SignalMap is a map of "supported" signals. As per the comment in GOLang's
|
|
||||||
// ztypes_windows.go: "More invented values for signals". Windows doesn't
|
|
||||||
// really support signals in any way, shape or form that Unix does.
|
|
||||||
//
|
|
||||||
// We have these so that docker kill can be used to gracefully (TERM) and
|
|
||||||
// forcibly (KILL) terminate a container on Windows.
|
|
||||||
var SignalMap = map[string]syscall.Signal{
|
|
||||||
"KILL": syscall.SIGKILL,
|
|
||||||
"TERM": syscall.SIGTERM,
|
|
||||||
}
|
|
104
vendor/github.com/docker/docker/pkg/signal/trap.go
generated
vendored
104
vendor/github.com/docker/docker/pkg/signal/trap.go
generated
vendored
@ -1,104 +0,0 @@
|
|||||||
package signal
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
gosignal "os/signal"
|
|
||||||
"path/filepath"
|
|
||||||
"runtime"
|
|
||||||
"strings"
|
|
||||||
"sync/atomic"
|
|
||||||
"syscall"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Trap sets up a simplified signal "trap", appropriate for common
|
|
||||||
// behavior expected from a vanilla unix command-line tool in general
|
|
||||||
// (and the Docker engine in particular).
|
|
||||||
//
|
|
||||||
// * If SIGINT or SIGTERM are received, `cleanup` is called, then the process is terminated.
|
|
||||||
// * If SIGINT or SIGTERM are received 3 times before cleanup is complete, then cleanup is
|
|
||||||
// skipped and the process is terminated immediately (allows force quit of stuck daemon)
|
|
||||||
// * A SIGQUIT always causes an exit without cleanup, with a goroutine dump preceding exit.
|
|
||||||
// * Ignore SIGPIPE events. These are generated by systemd when journald is restarted while
|
|
||||||
// the docker daemon is not restarted and also running under systemd.
|
|
||||||
// Fixes https://github.com/docker/docker/issues/19728
|
|
||||||
//
|
|
||||||
func Trap(cleanup func(), logger interface {
|
|
||||||
Info(args ...interface{})
|
|
||||||
}) {
|
|
||||||
c := make(chan os.Signal, 1)
|
|
||||||
// we will handle INT, TERM, QUIT, SIGPIPE here
|
|
||||||
signals := []os.Signal{os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT, syscall.SIGPIPE}
|
|
||||||
gosignal.Notify(c, signals...)
|
|
||||||
go func() {
|
|
||||||
interruptCount := uint32(0)
|
|
||||||
for sig := range c {
|
|
||||||
if sig == syscall.SIGPIPE {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
go func(sig os.Signal) {
|
|
||||||
logger.Info(fmt.Sprintf("Processing signal '%v'", sig))
|
|
||||||
switch sig {
|
|
||||||
case os.Interrupt, syscall.SIGTERM:
|
|
||||||
if atomic.LoadUint32(&interruptCount) < 3 {
|
|
||||||
// Initiate the cleanup only once
|
|
||||||
if atomic.AddUint32(&interruptCount, 1) == 1 {
|
|
||||||
// Call the provided cleanup handler
|
|
||||||
cleanup()
|
|
||||||
os.Exit(0)
|
|
||||||
} else {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// 3 SIGTERM/INT signals received; force exit without cleanup
|
|
||||||
logger.Info("Forcing docker daemon shutdown without cleanup; 3 interrupts received")
|
|
||||||
}
|
|
||||||
case syscall.SIGQUIT:
|
|
||||||
DumpStacks("")
|
|
||||||
logger.Info("Forcing docker daemon shutdown without cleanup on SIGQUIT")
|
|
||||||
}
|
|
||||||
//for the SIGINT/TERM, and SIGQUIT non-clean shutdown case, exit with 128 + signal #
|
|
||||||
os.Exit(128 + int(sig.(syscall.Signal)))
|
|
||||||
}(sig)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
const stacksLogNameTemplate = "goroutine-stacks-%s.log"
|
|
||||||
|
|
||||||
// DumpStacks appends the runtime stack into file in dir and returns full path
|
|
||||||
// to that file.
|
|
||||||
func DumpStacks(dir string) (string, error) {
|
|
||||||
var (
|
|
||||||
buf []byte
|
|
||||||
stackSize int
|
|
||||||
)
|
|
||||||
bufferLen := 16384
|
|
||||||
for stackSize == len(buf) {
|
|
||||||
buf = make([]byte, bufferLen)
|
|
||||||
stackSize = runtime.Stack(buf, true)
|
|
||||||
bufferLen *= 2
|
|
||||||
}
|
|
||||||
buf = buf[:stackSize]
|
|
||||||
var f *os.File
|
|
||||||
if dir != "" {
|
|
||||||
path := filepath.Join(dir, fmt.Sprintf(stacksLogNameTemplate, strings.Replace(time.Now().Format(time.RFC3339), ":", "", -1)))
|
|
||||||
var err error
|
|
||||||
f, err = os.OpenFile(path, os.O_CREATE|os.O_WRONLY, 0666)
|
|
||||||
if err != nil {
|
|
||||||
return "", errors.Wrap(err, "failed to open file to write the goroutine stacks")
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
defer f.Sync()
|
|
||||||
} else {
|
|
||||||
f = os.Stderr
|
|
||||||
}
|
|
||||||
if _, err := f.Write(buf); err != nil {
|
|
||||||
return "", errors.Wrap(err, "failed to write goroutine stacks")
|
|
||||||
}
|
|
||||||
return f.Name(), nil
|
|
||||||
}
|
|
191
vendor/github.com/opencontainers/runtime-tools/LICENSE
generated
vendored
191
vendor/github.com/opencontainers/runtime-tools/LICENSE
generated
vendored
@ -1,191 +0,0 @@
|
|||||||
|
|
||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
Copyright 2015 The Linux Foundation.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
128
vendor/github.com/opencontainers/runtime-tools/README.md
generated
vendored
128
vendor/github.com/opencontainers/runtime-tools/README.md
generated
vendored
@ -1,128 +0,0 @@
|
|||||||
# oci-runtime-tool [](https://travis-ci.org/opencontainers/runtime-tools) [](https://goreportcard.com/report/github.com/opencontainers/runtime-tools)
|
|
||||||
|
|
||||||
oci-runtime-tool is a collection of tools for working with the [OCI runtime specification][runtime-spec].
|
|
||||||
To build from source code, runtime-tools requires Go 1.7.x or above.
|
|
||||||
|
|
||||||
## Generating an OCI runtime spec configuration files
|
|
||||||
|
|
||||||
[`oci-runtime-tool generate`][generate.1] generates [configuration JSON][config.json] for an [OCI bundle][bundle].
|
|
||||||
[OCI-compatible runtimes][runtime-spec] like [runC][] expect to read the configuration from `config.json`.
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ oci-runtime-tool generate --output config.json
|
|
||||||
$ cat config.json
|
|
||||||
{
|
|
||||||
"ociVersion": "0.5.0",
|
|
||||||
…
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Validating an OCI bundle
|
|
||||||
|
|
||||||
[`oci-runtime-tool validate`][validate.1] validates an OCI bundle.
|
|
||||||
The error message will be printed if the OCI bundle failed the validation procedure.
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ oci-runtime-tool generate
|
|
||||||
$ oci-runtime-tool validate
|
|
||||||
INFO[0000] Bundle validation succeeded.
|
|
||||||
```
|
|
||||||
|
|
||||||
## Testing OCI runtimes
|
|
||||||
|
|
||||||
The runtime validation suite uses [node-tap][], which is packaged for some distributions (for example, it is in [Debian's `node-tap` package][debian-node-tap]).
|
|
||||||
If your distribution does not package node-tap, you can install [npm][] (for example, from [Gentoo's `nodejs` package][gentoo-nodejs]) and use it:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ npm install tap
|
|
||||||
```
|
|
||||||
|
|
||||||
Build the validation executables:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ make runtimetest validation-executables
|
|
||||||
```
|
|
||||||
|
|
||||||
Runtime validation currently [only supports](docs/runtime-compliance-testing.md) the [OCI Runtime Command Line Interface](doc/command-line-interface.md).
|
|
||||||
If we add support for alternative APIs in the future, runtime validation will gain an option to select the desired runtime API.
|
|
||||||
For the command line interface, the `RUNTIME` option selects the runtime command (`funC` in the [OCI Runtime Command Line Interface](doc/command-line-interface.md)).
|
|
||||||
|
|
||||||
```
|
|
||||||
$ sudo make RUNTIME=runc localvalidation
|
|
||||||
RUNTIME=runc tap validation/pidfile.t validation/linux_cgroups_hugetlb.t validation/linux_cgroups_memory.t validation/linux_rootfs_propagation_shared.t validation/kill.t validation/create.t validation/poststart.t validation/linux_cgroups_network.t validation/poststop_fail.t validation/linux_readonly_paths.t validation/prestart_fail.t validation/hooks_stdin.t validation/default.t validation/linux_masked_paths.t validation/poststop.t validation/misc_props.t validation/prestart.t validation/poststart_fail.t validation/mounts.t validation/linux_cgroups_relative_pids.t validation/process_user.t validation/process.t validation/hooks.t validation/process_capabilities_fail.t validation/process_rlimits_fail.t validation/linux_cgroups_relative_cpus.t validation/process_rlimits.t validation/linux_cgroups_relative_blkio.t validation/linux_sysctl.t validation/linux_seccomp.t validation/linux_devices.t validation/start.t validation/linux_cgroups_pids.t validation/process_capabilities.t validation/process_oom_score_adj.t validation/linux_cgroups_relative_hugetlb.t validation/linux_cgroups_cpus.t validation/linux_cgroups_relative_memory.t validation/state.t validation/root_readonly_true.t validation/linux_cgroups_blkio.t validation/linux_rootfs_propagation_unbindable.t validation/delete.t validation/linux_cgroups_relative_network.t validation/hostname.t validation/killsig.t validation/linux_uid_mappings.t
|
|
||||||
validation/pidfile.t .failed to create the container
|
|
||||||
container_linux.go:348: starting container process caused "process_linux.go:402: container init caused \"process_linux.go:367: setting cgroup config for procHooks process caused \\\"failed to write 56892210544640 to hugetlb.1GB.limit_in_bytes: open /sys/fs/cgroup/hugetlb/cgrouptest/hugetlb.1GB.limit_in_bytes: permission denied\\\"\""
|
|
||||||
exit status 1
|
|
||||||
validation/pidfile.t .................................. 1/1 315ms
|
|
||||||
validation/linux_cgroups_hugetlb.t .................... 0/1
|
|
||||||
not ok validation/linux_cgroups_hugetlb.t
|
|
||||||
timeout: 30000
|
|
||||||
file: validation/linux_cgroups_hugetlb.t
|
|
||||||
command: validation/linux_cgroups_hugetlb.t
|
|
||||||
args: []
|
|
||||||
stdio:
|
|
||||||
- 0
|
|
||||||
- pipe
|
|
||||||
- 2
|
|
||||||
cwd: /…/go/src/github.com/opencontainers/runtime-tools
|
|
||||||
exitCode: 1
|
|
||||||
|
|
||||||
validation/linux_cgroups_memory.t ..................... 9/9
|
|
||||||
validation/linux_rootfs_propagation_shared.t ...... 252/282
|
|
||||||
not ok shared root propogation exposes "/target348456609/mount892511628/example376408222"
|
|
||||||
|
|
||||||
Skipped: 29
|
|
||||||
/dev/null (default device) has unconfigured permissions
|
|
||||||
…
|
|
||||||
total ........................................... 4381/4962
|
|
||||||
|
|
||||||
|
|
||||||
4381 passing (1m)
|
|
||||||
567 pending
|
|
||||||
14 failing
|
|
||||||
|
|
||||||
make: *** [Makefile:44: localvalidation] Error 1
|
|
||||||
```
|
|
||||||
|
|
||||||
You can also run an individual test executable directly:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ RUNTIME=runc validation/default.t
|
|
||||||
TAP version 13
|
|
||||||
ok 1 - has expected hostname
|
|
||||||
---
|
|
||||||
{
|
|
||||||
"actual": "mrsdalloway",
|
|
||||||
"expected": "mrsdalloway"
|
|
||||||
}
|
|
||||||
...
|
|
||||||
…
|
|
||||||
ok 287 # SKIP linux.gidMappings not set
|
|
||||||
1..287
|
|
||||||
```
|
|
||||||
|
|
||||||
If you cannot install node-tap, you can probably run the test suite with another [TAP consumer][tap-consumers].
|
|
||||||
For example, with [`prove`][prove]:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ sudo make TAP='prove -Q -j9' RUNTIME=runc VALIDATION_TESTS=validation/pidfile.t localvalidation
|
|
||||||
RUNTIME=runc prove -Q -j9 validation/pidfile.t
|
|
||||||
All tests successful.
|
|
||||||
Files=1, Tests=1, 0 wallclock secs ( 0.01 usr 0.01 sys + 0.03 cusr 0.03 csys = 0.08 CPU)
|
|
||||||
Result: PASS
|
|
||||||
```
|
|
||||||
|
|
||||||
[bundle]: https://github.com/opencontainers/runtime-spec/blob/master/bundle.md
|
|
||||||
[config.json]: https://github.com/opencontainers/runtime-spec/blob/master/config.md
|
|
||||||
[debian-node-tap]: https://packages.debian.org/stretch/node-tap
|
|
||||||
[debian-nodejs]: https://packages.debian.org/stretch/nodejs
|
|
||||||
[gentoo-nodejs]: https://packages.gentoo.org/packages/net-libs/nodejs
|
|
||||||
[node-tap]: http://www.node-tap.org/
|
|
||||||
[npm]: https://www.npmjs.com/
|
|
||||||
[prove]: http://search.cpan.org/~leont/Test-Harness-3.39/bin/prove
|
|
||||||
[runC]: https://github.com/opencontainers/runc
|
|
||||||
[runtime-spec]: https://github.com/opencontainers/runtime-spec
|
|
||||||
[tap-consumers]: https://testanything.org/consumers.html
|
|
||||||
|
|
||||||
[generate.1]: man/oci-runtime-tool-generate.1.md
|
|
||||||
[validate.1]: man/oci-runtime-tool-validate.1.md
|
|
122
vendor/github.com/opencontainers/runtime-tools/error/error.go
generated
vendored
122
vendor/github.com/opencontainers/runtime-tools/error/error.go
generated
vendored
@ -1,122 +0,0 @@
|
|||||||
// Package error implements generic tooling for tracking RFC 2119
|
|
||||||
// violations and linking back to the appropriate specification section.
|
|
||||||
package error
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Level represents the RFC 2119 compliance levels
|
|
||||||
type Level int
|
|
||||||
|
|
||||||
const (
|
|
||||||
// MAY-level
|
|
||||||
|
|
||||||
// May represents 'MAY' in RFC 2119.
|
|
||||||
May Level = iota
|
|
||||||
// Optional represents 'OPTIONAL' in RFC 2119.
|
|
||||||
Optional
|
|
||||||
|
|
||||||
// SHOULD-level
|
|
||||||
|
|
||||||
// Should represents 'SHOULD' in RFC 2119.
|
|
||||||
Should
|
|
||||||
// ShouldNot represents 'SHOULD NOT' in RFC 2119.
|
|
||||||
ShouldNot
|
|
||||||
// Recommended represents 'RECOMMENDED' in RFC 2119.
|
|
||||||
Recommended
|
|
||||||
// NotRecommended represents 'NOT RECOMMENDED' in RFC 2119.
|
|
||||||
NotRecommended
|
|
||||||
|
|
||||||
// MUST-level
|
|
||||||
|
|
||||||
// Must represents 'MUST' in RFC 2119
|
|
||||||
Must
|
|
||||||
// MustNot represents 'MUST NOT' in RFC 2119.
|
|
||||||
MustNot
|
|
||||||
// Shall represents 'SHALL' in RFC 2119.
|
|
||||||
Shall
|
|
||||||
// ShallNot represents 'SHALL NOT' in RFC 2119.
|
|
||||||
ShallNot
|
|
||||||
// Required represents 'REQUIRED' in RFC 2119.
|
|
||||||
Required
|
|
||||||
)
|
|
||||||
|
|
||||||
// Error represents an error with compliance level and specification reference.
|
|
||||||
type Error struct {
|
|
||||||
// Level represents the RFC 2119 compliance level.
|
|
||||||
Level Level
|
|
||||||
|
|
||||||
// Reference is a URL for the violated specification requirement.
|
|
||||||
Reference string
|
|
||||||
|
|
||||||
// Err holds additional details about the violation.
|
|
||||||
Err error
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseLevel takes a string level and returns the RFC 2119 compliance level constant.
|
|
||||||
func ParseLevel(level string) (Level, error) {
|
|
||||||
switch strings.ToUpper(level) {
|
|
||||||
case "MAY":
|
|
||||||
fallthrough
|
|
||||||
case "OPTIONAL":
|
|
||||||
return May, nil
|
|
||||||
case "SHOULD":
|
|
||||||
fallthrough
|
|
||||||
case "SHOULDNOT":
|
|
||||||
fallthrough
|
|
||||||
case "RECOMMENDED":
|
|
||||||
fallthrough
|
|
||||||
case "NOTRECOMMENDED":
|
|
||||||
return Should, nil
|
|
||||||
case "MUST":
|
|
||||||
fallthrough
|
|
||||||
case "MUSTNOT":
|
|
||||||
fallthrough
|
|
||||||
case "SHALL":
|
|
||||||
fallthrough
|
|
||||||
case "SHALLNOT":
|
|
||||||
fallthrough
|
|
||||||
case "REQUIRED":
|
|
||||||
return Must, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var l Level
|
|
||||||
return l, fmt.Errorf("%q is not a valid compliance level", level)
|
|
||||||
}
|
|
||||||
|
|
||||||
// String takes a RFC 2119 compliance level constant and returns a string representation.
|
|
||||||
func (level Level) String() string {
|
|
||||||
switch level {
|
|
||||||
case May:
|
|
||||||
return "MAY"
|
|
||||||
case Optional:
|
|
||||||
return "OPTIONAL"
|
|
||||||
case Should:
|
|
||||||
return "SHOULD"
|
|
||||||
case ShouldNot:
|
|
||||||
return "SHOULD NOT"
|
|
||||||
case Recommended:
|
|
||||||
return "RECOMMENDED"
|
|
||||||
case NotRecommended:
|
|
||||||
return "NOT RECOMMENDED"
|
|
||||||
case Must:
|
|
||||||
return "MUST"
|
|
||||||
case MustNot:
|
|
||||||
return "MUST NOT"
|
|
||||||
case Shall:
|
|
||||||
return "SHALL"
|
|
||||||
case ShallNot:
|
|
||||||
return "SHALL NOT"
|
|
||||||
case Required:
|
|
||||||
return "REQUIRED"
|
|
||||||
}
|
|
||||||
|
|
||||||
panic(fmt.Sprintf("%d is not a valid compliance level", level))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error returns the error message with specification reference.
|
|
||||||
func (err *Error) Error() string {
|
|
||||||
return fmt.Sprintf("%s\nRefer to: %s", err.Err.Error(), err.Reference)
|
|
||||||
}
|
|
48
vendor/github.com/opencontainers/runtime-tools/filepath/abs.go
generated
vendored
48
vendor/github.com/opencontainers/runtime-tools/filepath/abs.go
generated
vendored
@ -1,48 +0,0 @@
|
|||||||
package filepath
|
|
||||||
|
|
||||||
import (
|
|
||||||
"regexp"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
var windowsAbs = regexp.MustCompile(`^[a-zA-Z]:\\.*$`)
|
|
||||||
|
|
||||||
// Abs is a version of path/filepath's Abs with an explicit operating
|
|
||||||
// system and current working directory.
|
|
||||||
func Abs(os, path, cwd string) (_ string, err error) {
|
|
||||||
if IsAbs(os, path) {
|
|
||||||
return Clean(os, path), nil
|
|
||||||
}
|
|
||||||
return Clean(os, Join(os, cwd, path)), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsAbs is a version of path/filepath's IsAbs with an explicit
|
|
||||||
// operating system.
|
|
||||||
func IsAbs(os, path string) bool {
|
|
||||||
if os == "windows" {
|
|
||||||
// FIXME: copy hideous logic from Go's
|
|
||||||
// src/path/filepath/path_windows.go into somewhere where we can
|
|
||||||
// put 3-clause BSD licensed code.
|
|
||||||
return windowsAbs.MatchString(path)
|
|
||||||
}
|
|
||||||
sep := Separator(os)
|
|
||||||
|
|
||||||
// POSIX has [1]:
|
|
||||||
//
|
|
||||||
// > If a pathname begins with two successive <slash> characters,
|
|
||||||
// > the first component following the leading <slash> characters
|
|
||||||
// > may be interpreted in an implementation-defined manner,
|
|
||||||
// > although more than two leading <slash> characters shall be
|
|
||||||
// > treated as a single <slash> character.
|
|
||||||
//
|
|
||||||
// And Boost treats // as non-absolute [2], but Linux [3,4], Python
|
|
||||||
// [5] and Go [6] all treat // as absolute.
|
|
||||||
//
|
|
||||||
// [1]: http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap04.html#tag_04_13
|
|
||||||
// [2]: https://github.com/boostorg/filesystem/blob/boost-1.64.0/test/path_test.cpp#L861
|
|
||||||
// [3]: http://man7.org/linux/man-pages/man7/path_resolution.7.html
|
|
||||||
// [4]: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/Documentation/filesystems/path-lookup.md?h=v4.12#n41
|
|
||||||
// [5]: https://github.com/python/cpython/blob/v3.6.1/Lib/posixpath.py#L64-L66
|
|
||||||
// [6]: https://go.googlesource.com/go/+/go1.8.3/src/path/path.go#199
|
|
||||||
return strings.HasPrefix(path, string(sep))
|
|
||||||
}
|
|
32
vendor/github.com/opencontainers/runtime-tools/filepath/ancestor.go
generated
vendored
32
vendor/github.com/opencontainers/runtime-tools/filepath/ancestor.go
generated
vendored
@ -1,32 +0,0 @@
|
|||||||
package filepath
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// IsAncestor returns true when pathB is an strict ancestor of pathA,
|
|
||||||
// and false where the paths are equal or pathB is outside of pathA.
|
|
||||||
// Paths that are not absolute will be made absolute with Abs.
|
|
||||||
func IsAncestor(os, pathA, pathB, cwd string) (_ bool, err error) {
|
|
||||||
if pathA == pathB {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
pathA, err = Abs(os, pathA, cwd)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
pathB, err = Abs(os, pathB, cwd)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
sep := Separator(os)
|
|
||||||
if !strings.HasSuffix(pathA, string(sep)) {
|
|
||||||
pathA = fmt.Sprintf("%s%c", pathA, sep)
|
|
||||||
}
|
|
||||||
if pathA == pathB {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
return strings.HasPrefix(pathB, pathA), nil
|
|
||||||
}
|
|
74
vendor/github.com/opencontainers/runtime-tools/filepath/clean.go
generated
vendored
74
vendor/github.com/opencontainers/runtime-tools/filepath/clean.go
generated
vendored
@ -1,74 +0,0 @@
|
|||||||
package filepath
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Clean is an explicit-OS version of path/filepath's Clean.
|
|
||||||
func Clean(os, path string) string {
|
|
||||||
abs := IsAbs(os, path)
|
|
||||||
sep := Separator(os)
|
|
||||||
elements := strings.Split(path, string(sep))
|
|
||||||
|
|
||||||
// Replace multiple Separator elements with a single one.
|
|
||||||
for i := 0; i < len(elements); i++ {
|
|
||||||
if len(elements[i]) == 0 {
|
|
||||||
elements = append(elements[:i], elements[i+1:]...)
|
|
||||||
i--
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Eliminate each . path name element (the current directory).
|
|
||||||
for i := 0; i < len(elements); i++ {
|
|
||||||
if elements[i] == "." && len(elements) > 1 {
|
|
||||||
elements = append(elements[:i], elements[i+1:]...)
|
|
||||||
i--
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Eliminate each inner .. path name element (the parent directory)
|
|
||||||
// along with the non-.. element that precedes it.
|
|
||||||
for i := 1; i < len(elements); i++ {
|
|
||||||
if i == 1 && abs && sep == '\\' {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if i > 0 && elements[i] == ".." {
|
|
||||||
elements = append(elements[:i-1], elements[i+1:]...)
|
|
||||||
i -= 2
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Eliminate .. elements that begin a rooted path:
|
|
||||||
// that is, replace "/.." by "/" at the beginning of a path,
|
|
||||||
// assuming Separator is '/'.
|
|
||||||
offset := 0
|
|
||||||
if sep == '\\' {
|
|
||||||
offset = 1
|
|
||||||
}
|
|
||||||
if abs {
|
|
||||||
for len(elements) > offset && elements[offset] == ".." {
|
|
||||||
elements = append(elements[:offset], elements[offset+1:]...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
cleaned := strings.Join(elements, string(sep))
|
|
||||||
if abs {
|
|
||||||
if sep == '/' {
|
|
||||||
cleaned = fmt.Sprintf("%c%s", sep, cleaned)
|
|
||||||
} else if len(elements) == 1 {
|
|
||||||
cleaned = fmt.Sprintf("%s%c", cleaned, sep)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the result of this process is an empty string, Clean returns
|
|
||||||
// the string ".".
|
|
||||||
if len(cleaned) == 0 {
|
|
||||||
cleaned = "."
|
|
||||||
}
|
|
||||||
|
|
||||||
if cleaned == path {
|
|
||||||
return path
|
|
||||||
}
|
|
||||||
return Clean(os, cleaned)
|
|
||||||
}
|
|
6
vendor/github.com/opencontainers/runtime-tools/filepath/doc.go
generated
vendored
6
vendor/github.com/opencontainers/runtime-tools/filepath/doc.go
generated
vendored
@ -1,6 +0,0 @@
|
|||||||
// Package filepath implements Go's filepath package with explicit
|
|
||||||
// operating systems (and for some functions and explicit working
|
|
||||||
// directory). This allows tools built for one OS to operate on paths
|
|
||||||
// targeting another OS. For example, a Linux build can determine
|
|
||||||
// whether a path is absolute on Linux or on Windows.
|
|
||||||
package filepath
|
|
9
vendor/github.com/opencontainers/runtime-tools/filepath/join.go
generated
vendored
9
vendor/github.com/opencontainers/runtime-tools/filepath/join.go
generated
vendored
@ -1,9 +0,0 @@
|
|||||||
package filepath
|
|
||||||
|
|
||||||
import "strings"
|
|
||||||
|
|
||||||
// Join is an explicit-OS version of path/filepath's Join.
|
|
||||||
func Join(os string, elem ...string) string {
|
|
||||||
sep := Separator(os)
|
|
||||||
return Clean(os, strings.Join(elem, string(sep)))
|
|
||||||
}
|
|
9
vendor/github.com/opencontainers/runtime-tools/filepath/separator.go
generated
vendored
9
vendor/github.com/opencontainers/runtime-tools/filepath/separator.go
generated
vendored
@ -1,9 +0,0 @@
|
|||||||
package filepath
|
|
||||||
|
|
||||||
// Separator is an explicit-OS version of path/filepath's Separator.
|
|
||||||
func Separator(os string) rune {
|
|
||||||
if os == "windows" {
|
|
||||||
return '\\'
|
|
||||||
}
|
|
||||||
return '/'
|
|
||||||
}
|
|
172
vendor/github.com/opencontainers/runtime-tools/generate/config.go
generated
vendored
172
vendor/github.com/opencontainers/runtime-tools/generate/config.go
generated
vendored
@ -1,172 +0,0 @@
|
|||||||
package generate
|
|
||||||
|
|
||||||
import (
|
|
||||||
rspec "github.com/opencontainers/runtime-spec/specs-go"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (g *Generator) initConfig() {
|
|
||||||
if g.Config == nil {
|
|
||||||
g.Config = &rspec.Spec{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *Generator) initConfigProcess() {
|
|
||||||
g.initConfig()
|
|
||||||
if g.Config.Process == nil {
|
|
||||||
g.Config.Process = &rspec.Process{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *Generator) initConfigProcessConsoleSize() {
|
|
||||||
g.initConfigProcess()
|
|
||||||
if g.Config.Process.ConsoleSize == nil {
|
|
||||||
g.Config.Process.ConsoleSize = &rspec.Box{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *Generator) initConfigProcessCapabilities() {
|
|
||||||
g.initConfigProcess()
|
|
||||||
if g.Config.Process.Capabilities == nil {
|
|
||||||
g.Config.Process.Capabilities = &rspec.LinuxCapabilities{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *Generator) initConfigRoot() {
|
|
||||||
g.initConfig()
|
|
||||||
if g.Config.Root == nil {
|
|
||||||
g.Config.Root = &rspec.Root{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *Generator) initConfigAnnotations() {
|
|
||||||
g.initConfig()
|
|
||||||
if g.Config.Annotations == nil {
|
|
||||||
g.Config.Annotations = make(map[string]string)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *Generator) initConfigHooks() {
|
|
||||||
g.initConfig()
|
|
||||||
if g.Config.Hooks == nil {
|
|
||||||
g.Config.Hooks = &rspec.Hooks{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *Generator) initConfigLinux() {
|
|
||||||
g.initConfig()
|
|
||||||
if g.Config.Linux == nil {
|
|
||||||
g.Config.Linux = &rspec.Linux{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *Generator) initConfigLinuxIntelRdt() {
|
|
||||||
g.initConfigLinux()
|
|
||||||
if g.Config.Linux.IntelRdt == nil {
|
|
||||||
g.Config.Linux.IntelRdt = &rspec.LinuxIntelRdt{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *Generator) initConfigLinuxSysctl() {
|
|
||||||
g.initConfigLinux()
|
|
||||||
if g.Config.Linux.Sysctl == nil {
|
|
||||||
g.Config.Linux.Sysctl = make(map[string]string)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *Generator) initConfigLinuxSeccomp() {
|
|
||||||
g.initConfigLinux()
|
|
||||||
if g.Config.Linux.Seccomp == nil {
|
|
||||||
g.Config.Linux.Seccomp = &rspec.LinuxSeccomp{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *Generator) initConfigLinuxResources() {
|
|
||||||
g.initConfigLinux()
|
|
||||||
if g.Config.Linux.Resources == nil {
|
|
||||||
g.Config.Linux.Resources = &rspec.LinuxResources{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *Generator) initConfigLinuxResourcesBlockIO() {
|
|
||||||
g.initConfigLinuxResources()
|
|
||||||
if g.Config.Linux.Resources.BlockIO == nil {
|
|
||||||
g.Config.Linux.Resources.BlockIO = &rspec.LinuxBlockIO{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *Generator) initConfigLinuxResourcesCPU() {
|
|
||||||
g.initConfigLinuxResources()
|
|
||||||
if g.Config.Linux.Resources.CPU == nil {
|
|
||||||
g.Config.Linux.Resources.CPU = &rspec.LinuxCPU{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *Generator) initConfigLinuxResourcesMemory() {
|
|
||||||
g.initConfigLinuxResources()
|
|
||||||
if g.Config.Linux.Resources.Memory == nil {
|
|
||||||
g.Config.Linux.Resources.Memory = &rspec.LinuxMemory{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *Generator) initConfigLinuxResourcesNetwork() {
|
|
||||||
g.initConfigLinuxResources()
|
|
||||||
if g.Config.Linux.Resources.Network == nil {
|
|
||||||
g.Config.Linux.Resources.Network = &rspec.LinuxNetwork{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *Generator) initConfigLinuxResourcesPids() {
|
|
||||||
g.initConfigLinuxResources()
|
|
||||||
if g.Config.Linux.Resources.Pids == nil {
|
|
||||||
g.Config.Linux.Resources.Pids = &rspec.LinuxPids{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *Generator) initConfigSolaris() {
|
|
||||||
g.initConfig()
|
|
||||||
if g.Config.Solaris == nil {
|
|
||||||
g.Config.Solaris = &rspec.Solaris{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *Generator) initConfigSolarisCappedCPU() {
|
|
||||||
g.initConfigSolaris()
|
|
||||||
if g.Config.Solaris.CappedCPU == nil {
|
|
||||||
g.Config.Solaris.CappedCPU = &rspec.SolarisCappedCPU{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *Generator) initConfigSolarisCappedMemory() {
|
|
||||||
g.initConfigSolaris()
|
|
||||||
if g.Config.Solaris.CappedMemory == nil {
|
|
||||||
g.Config.Solaris.CappedMemory = &rspec.SolarisCappedMemory{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *Generator) initConfigWindows() {
|
|
||||||
g.initConfig()
|
|
||||||
if g.Config.Windows == nil {
|
|
||||||
g.Config.Windows = &rspec.Windows{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *Generator) initConfigWindowsHyperV() {
|
|
||||||
g.initConfigWindows()
|
|
||||||
if g.Config.Windows.HyperV == nil {
|
|
||||||
g.Config.Windows.HyperV = &rspec.WindowsHyperV{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *Generator) initConfigWindowsResources() {
|
|
||||||
g.initConfigWindows()
|
|
||||||
if g.Config.Windows.Resources == nil {
|
|
||||||
g.Config.Windows.Resources = &rspec.WindowsResources{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *Generator) initConfigWindowsResourcesMemory() {
|
|
||||||
g.initConfigWindowsResources()
|
|
||||||
if g.Config.Windows.Resources.Memory == nil {
|
|
||||||
g.Config.Windows.Resources.Memory = &rspec.WindowsMemoryResources{}
|
|
||||||
}
|
|
||||||
}
|
|
1562
vendor/github.com/opencontainers/runtime-tools/generate/generate.go
generated
vendored
1562
vendor/github.com/opencontainers/runtime-tools/generate/generate.go
generated
vendored
File diff suppressed because it is too large
Load Diff
12
vendor/github.com/opencontainers/runtime-tools/generate/seccomp/consts.go
generated
vendored
12
vendor/github.com/opencontainers/runtime-tools/generate/seccomp/consts.go
generated
vendored
@ -1,12 +0,0 @@
|
|||||||
package seccomp
|
|
||||||
|
|
||||||
const (
|
|
||||||
seccompOverwrite = "overwrite"
|
|
||||||
seccompAppend = "append"
|
|
||||||
nothing = "nothing"
|
|
||||||
kill = "kill"
|
|
||||||
trap = "trap"
|
|
||||||
trace = "trace"
|
|
||||||
allow = "allow"
|
|
||||||
errno = "errno"
|
|
||||||
)
|
|
135
vendor/github.com/opencontainers/runtime-tools/generate/seccomp/parse_action.go
generated
vendored
135
vendor/github.com/opencontainers/runtime-tools/generate/seccomp/parse_action.go
generated
vendored
@ -1,135 +0,0 @@
|
|||||||
package seccomp
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
rspec "github.com/opencontainers/runtime-spec/specs-go"
|
|
||||||
)
|
|
||||||
|
|
||||||
// SyscallOpts contain options for parsing syscall rules
|
|
||||||
type SyscallOpts struct {
|
|
||||||
Action string
|
|
||||||
Syscall string
|
|
||||||
Index string
|
|
||||||
Value string
|
|
||||||
ValueTwo string
|
|
||||||
Operator string
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseSyscallFlag takes a SyscallOpts struct and the seccomp configuration
|
|
||||||
// and sets the new syscall rule accordingly
|
|
||||||
func ParseSyscallFlag(args SyscallOpts, config *rspec.LinuxSeccomp) error {
|
|
||||||
var arguments []string
|
|
||||||
if args.Index != "" && args.Value != "" && args.ValueTwo != "" && args.Operator != "" {
|
|
||||||
arguments = []string{args.Action, args.Syscall, args.Index, args.Value,
|
|
||||||
args.ValueTwo, args.Operator}
|
|
||||||
} else {
|
|
||||||
arguments = []string{args.Action, args.Syscall}
|
|
||||||
}
|
|
||||||
|
|
||||||
action, _ := parseAction(arguments[0])
|
|
||||||
if action == config.DefaultAction && args.argsAreEmpty() {
|
|
||||||
// default already set, no need to make changes
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var newSyscall rspec.LinuxSyscall
|
|
||||||
numOfArgs := len(arguments)
|
|
||||||
if numOfArgs == 6 || numOfArgs == 2 {
|
|
||||||
argStruct, err := parseArguments(arguments[1:])
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
newSyscall = newSyscallStruct(arguments[1], action, argStruct)
|
|
||||||
} else {
|
|
||||||
return fmt.Errorf("incorrect number of arguments to ParseSyscall: %d", numOfArgs)
|
|
||||||
}
|
|
||||||
|
|
||||||
descison, err := decideCourseOfAction(&newSyscall, config.Syscalls)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
delimDescison := strings.Split(descison, ":")
|
|
||||||
|
|
||||||
if delimDescison[0] == seccompAppend {
|
|
||||||
config.Syscalls = append(config.Syscalls, newSyscall)
|
|
||||||
}
|
|
||||||
|
|
||||||
if delimDescison[0] == seccompOverwrite {
|
|
||||||
indexForOverwrite, err := strconv.ParseInt(delimDescison[1], 10, 32)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
config.Syscalls[indexForOverwrite] = newSyscall
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var actions = map[string]rspec.LinuxSeccompAction{
|
|
||||||
"allow": rspec.ActAllow,
|
|
||||||
"errno": rspec.ActErrno,
|
|
||||||
"kill": rspec.ActKill,
|
|
||||||
"trace": rspec.ActTrace,
|
|
||||||
"trap": rspec.ActTrap,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Take passed action, return the SCMP_ACT_<ACTION> version of it
|
|
||||||
func parseAction(action string) (rspec.LinuxSeccompAction, error) {
|
|
||||||
a, ok := actions[action]
|
|
||||||
if !ok {
|
|
||||||
return "", fmt.Errorf("unrecognized action: %s", action)
|
|
||||||
}
|
|
||||||
return a, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseDefaultAction sets the default action of the seccomp configuration
|
|
||||||
// and then removes any rules that were already specified with this action
|
|
||||||
func ParseDefaultAction(action string, config *rspec.LinuxSeccomp) error {
|
|
||||||
if action == "" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
defaultAction, err := parseAction(action)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
config.DefaultAction = defaultAction
|
|
||||||
err = RemoveAllMatchingRules(config, defaultAction)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseDefaultActionForce simply sets the default action of the seccomp configuration
|
|
||||||
func ParseDefaultActionForce(action string, config *rspec.LinuxSeccomp) error {
|
|
||||||
if action == "" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
defaultAction, err := parseAction(action)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
config.DefaultAction = defaultAction
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func newSyscallStruct(name string, action rspec.LinuxSeccompAction, args []rspec.LinuxSeccompArg) rspec.LinuxSyscall {
|
|
||||||
syscallStruct := rspec.LinuxSyscall{
|
|
||||||
Names: []string{name},
|
|
||||||
Action: action,
|
|
||||||
Args: args,
|
|
||||||
}
|
|
||||||
return syscallStruct
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s SyscallOpts) argsAreEmpty() bool {
|
|
||||||
return (s.Index == "" &&
|
|
||||||
s.Value == "" &&
|
|
||||||
s.ValueTwo == "" &&
|
|
||||||
s.Operator == "")
|
|
||||||
}
|
|
55
vendor/github.com/opencontainers/runtime-tools/generate/seccomp/parse_architecture.go
generated
vendored
55
vendor/github.com/opencontainers/runtime-tools/generate/seccomp/parse_architecture.go
generated
vendored
@ -1,55 +0,0 @@
|
|||||||
package seccomp
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
rspec "github.com/opencontainers/runtime-spec/specs-go"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ParseArchitectureFlag takes the raw string passed with the --arch flag, parses it
|
|
||||||
// and updates the Seccomp config accordingly
|
|
||||||
func ParseArchitectureFlag(architectureArg string, config *rspec.LinuxSeccomp) error {
|
|
||||||
correctedArch, err := parseArch(architectureArg)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
shouldAppend := true
|
|
||||||
for _, alreadySpecified := range config.Architectures {
|
|
||||||
if correctedArch == alreadySpecified {
|
|
||||||
shouldAppend = false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if shouldAppend {
|
|
||||||
config.Architectures = append(config.Architectures, correctedArch)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseArch(arch string) (rspec.Arch, error) {
|
|
||||||
arches := map[string]rspec.Arch{
|
|
||||||
"x86": rspec.ArchX86,
|
|
||||||
"amd64": rspec.ArchX86_64,
|
|
||||||
"x32": rspec.ArchX32,
|
|
||||||
"arm": rspec.ArchARM,
|
|
||||||
"arm64": rspec.ArchAARCH64,
|
|
||||||
"mips": rspec.ArchMIPS,
|
|
||||||
"mips64": rspec.ArchMIPS64,
|
|
||||||
"mips64n32": rspec.ArchMIPS64N32,
|
|
||||||
"mipsel": rspec.ArchMIPSEL,
|
|
||||||
"mipsel64": rspec.ArchMIPSEL64,
|
|
||||||
"mipsel64n32": rspec.ArchMIPSEL64N32,
|
|
||||||
"parisc": rspec.ArchPARISC,
|
|
||||||
"parisc64": rspec.ArchPARISC64,
|
|
||||||
"ppc": rspec.ArchPPC,
|
|
||||||
"ppc64": rspec.ArchPPC64,
|
|
||||||
"ppc64le": rspec.ArchPPC64LE,
|
|
||||||
"s390": rspec.ArchS390,
|
|
||||||
"s390x": rspec.ArchS390X,
|
|
||||||
}
|
|
||||||
a, ok := arches[arch]
|
|
||||||
if !ok {
|
|
||||||
return "", fmt.Errorf("unrecognized architecture: %s", arch)
|
|
||||||
}
|
|
||||||
return a, nil
|
|
||||||
}
|
|
73
vendor/github.com/opencontainers/runtime-tools/generate/seccomp/parse_arguments.go
generated
vendored
73
vendor/github.com/opencontainers/runtime-tools/generate/seccomp/parse_arguments.go
generated
vendored
@ -1,73 +0,0 @@
|
|||||||
package seccomp
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strconv"
|
|
||||||
|
|
||||||
rspec "github.com/opencontainers/runtime-spec/specs-go"
|
|
||||||
)
|
|
||||||
|
|
||||||
// parseArguments takes a list of arguments (delimArgs). It parses and fills out
|
|
||||||
// the argument information and returns a slice of arg structs
|
|
||||||
func parseArguments(delimArgs []string) ([]rspec.LinuxSeccompArg, error) {
|
|
||||||
nilArgSlice := []rspec.LinuxSeccompArg{}
|
|
||||||
numberOfArgs := len(delimArgs)
|
|
||||||
|
|
||||||
// No parameters passed with syscall
|
|
||||||
if numberOfArgs == 1 {
|
|
||||||
return nilArgSlice, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Correct number of parameters passed with syscall
|
|
||||||
if numberOfArgs == 5 {
|
|
||||||
syscallIndex, err := strconv.ParseUint(delimArgs[1], 10, 0)
|
|
||||||
if err != nil {
|
|
||||||
return nilArgSlice, err
|
|
||||||
}
|
|
||||||
|
|
||||||
syscallValue, err := strconv.ParseUint(delimArgs[2], 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return nilArgSlice, err
|
|
||||||
}
|
|
||||||
|
|
||||||
syscallValueTwo, err := strconv.ParseUint(delimArgs[3], 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return nilArgSlice, err
|
|
||||||
}
|
|
||||||
|
|
||||||
syscallOp, err := parseOperator(delimArgs[4])
|
|
||||||
if err != nil {
|
|
||||||
return nilArgSlice, err
|
|
||||||
}
|
|
||||||
|
|
||||||
argStruct := rspec.LinuxSeccompArg{
|
|
||||||
Index: uint(syscallIndex),
|
|
||||||
Value: syscallValue,
|
|
||||||
ValueTwo: syscallValueTwo,
|
|
||||||
Op: syscallOp,
|
|
||||||
}
|
|
||||||
|
|
||||||
argSlice := []rspec.LinuxSeccompArg{}
|
|
||||||
argSlice = append(argSlice, argStruct)
|
|
||||||
return argSlice, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return nilArgSlice, fmt.Errorf("incorrect number of arguments passed with syscall: %d", numberOfArgs)
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseOperator(operator string) (rspec.LinuxSeccompOperator, error) {
|
|
||||||
operators := map[string]rspec.LinuxSeccompOperator{
|
|
||||||
"NE": rspec.OpNotEqual,
|
|
||||||
"LT": rspec.OpLessThan,
|
|
||||||
"LE": rspec.OpLessEqual,
|
|
||||||
"EQ": rspec.OpEqualTo,
|
|
||||||
"GE": rspec.OpGreaterEqual,
|
|
||||||
"GT": rspec.OpGreaterThan,
|
|
||||||
"ME": rspec.OpMaskedEqual,
|
|
||||||
}
|
|
||||||
o, ok := operators[operator]
|
|
||||||
if !ok {
|
|
||||||
return "", fmt.Errorf("unrecognized operator: %s", operator)
|
|
||||||
}
|
|
||||||
return o, nil
|
|
||||||
}
|
|
52
vendor/github.com/opencontainers/runtime-tools/generate/seccomp/parse_remove.go
generated
vendored
52
vendor/github.com/opencontainers/runtime-tools/generate/seccomp/parse_remove.go
generated
vendored
@ -1,52 +0,0 @@
|
|||||||
package seccomp
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
rspec "github.com/opencontainers/runtime-spec/specs-go"
|
|
||||||
)
|
|
||||||
|
|
||||||
// RemoveAction takes the argument string that was passed with the --remove flag,
|
|
||||||
// parses it, and updates the Seccomp config accordingly
|
|
||||||
func RemoveAction(arguments string, config *rspec.LinuxSeccomp) error {
|
|
||||||
if config == nil {
|
|
||||||
return fmt.Errorf("Cannot remove action from nil Seccomp pointer")
|
|
||||||
}
|
|
||||||
|
|
||||||
syscallsToRemove := strings.Split(arguments, ",")
|
|
||||||
|
|
||||||
for counter, syscallStruct := range config.Syscalls {
|
|
||||||
if reflect.DeepEqual(syscallsToRemove, syscallStruct.Names) {
|
|
||||||
config.Syscalls = append(config.Syscalls[:counter], config.Syscalls[counter+1:]...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveAllSeccompRules removes all seccomp syscall rules
|
|
||||||
func RemoveAllSeccompRules(config *rspec.LinuxSeccomp) error {
|
|
||||||
if config == nil {
|
|
||||||
return fmt.Errorf("Cannot remove action from nil Seccomp pointer")
|
|
||||||
}
|
|
||||||
newSyscallSlice := []rspec.LinuxSyscall{}
|
|
||||||
config.Syscalls = newSyscallSlice
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveAllMatchingRules will remove any syscall rules that match the specified action
|
|
||||||
func RemoveAllMatchingRules(config *rspec.LinuxSeccomp, seccompAction rspec.LinuxSeccompAction) error {
|
|
||||||
if config == nil {
|
|
||||||
return fmt.Errorf("Cannot remove action from nil Seccomp pointer")
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, syscall := range config.Syscalls {
|
|
||||||
if reflect.DeepEqual(syscall.Action, seccompAction) {
|
|
||||||
RemoveAction(strings.Join(syscall.Names, ","), config)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
576
vendor/github.com/opencontainers/runtime-tools/generate/seccomp/seccomp_default.go
generated
vendored
576
vendor/github.com/opencontainers/runtime-tools/generate/seccomp/seccomp_default.go
generated
vendored
@ -1,576 +0,0 @@
|
|||||||
package seccomp
|
|
||||||
|
|
||||||
import (
|
|
||||||
"runtime"
|
|
||||||
|
|
||||||
"github.com/opencontainers/runtime-spec/specs-go"
|
|
||||||
rspec "github.com/opencontainers/runtime-spec/specs-go"
|
|
||||||
)
|
|
||||||
|
|
||||||
func arches() []rspec.Arch {
|
|
||||||
native := runtime.GOARCH
|
|
||||||
|
|
||||||
switch native {
|
|
||||||
case "amd64":
|
|
||||||
return []rspec.Arch{rspec.ArchX86_64, rspec.ArchX86, rspec.ArchX32}
|
|
||||||
case "arm64":
|
|
||||||
return []rspec.Arch{rspec.ArchARM, rspec.ArchAARCH64}
|
|
||||||
case "mips64":
|
|
||||||
return []rspec.Arch{rspec.ArchMIPS, rspec.ArchMIPS64, rspec.ArchMIPS64N32}
|
|
||||||
case "mips64n32":
|
|
||||||
return []rspec.Arch{rspec.ArchMIPS, rspec.ArchMIPS64, rspec.ArchMIPS64N32}
|
|
||||||
case "mipsel64":
|
|
||||||
return []rspec.Arch{rspec.ArchMIPSEL, rspec.ArchMIPSEL64, rspec.ArchMIPSEL64N32}
|
|
||||||
case "mipsel64n32":
|
|
||||||
return []rspec.Arch{rspec.ArchMIPSEL, rspec.ArchMIPSEL64, rspec.ArchMIPSEL64N32}
|
|
||||||
case "s390x":
|
|
||||||
return []rspec.Arch{rspec.ArchS390, rspec.ArchS390X}
|
|
||||||
default:
|
|
||||||
return []rspec.Arch{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DefaultProfile defines the whitelist for the default seccomp profile.
|
|
||||||
func DefaultProfile(rs *specs.Spec) *rspec.LinuxSeccomp {
|
|
||||||
|
|
||||||
syscalls := []rspec.LinuxSyscall{
|
|
||||||
{
|
|
||||||
Names: []string{
|
|
||||||
"accept",
|
|
||||||
"accept4",
|
|
||||||
"access",
|
|
||||||
"alarm",
|
|
||||||
"bind",
|
|
||||||
"brk",
|
|
||||||
"capget",
|
|
||||||
"capset",
|
|
||||||
"chdir",
|
|
||||||
"chmod",
|
|
||||||
"chown",
|
|
||||||
"chown32",
|
|
||||||
"clock_getres",
|
|
||||||
"clock_gettime",
|
|
||||||
"clock_nanosleep",
|
|
||||||
"close",
|
|
||||||
"connect",
|
|
||||||
"copy_file_range",
|
|
||||||
"creat",
|
|
||||||
"dup",
|
|
||||||
"dup2",
|
|
||||||
"dup3",
|
|
||||||
"epoll_create",
|
|
||||||
"epoll_create1",
|
|
||||||
"epoll_ctl",
|
|
||||||
"epoll_ctl_old",
|
|
||||||
"epoll_pwait",
|
|
||||||
"epoll_wait",
|
|
||||||
"epoll_wait_old",
|
|
||||||
"eventfd",
|
|
||||||
"eventfd2",
|
|
||||||
"execve",
|
|
||||||
"execveat",
|
|
||||||
"exit",
|
|
||||||
"exit_group",
|
|
||||||
"faccessat",
|
|
||||||
"fadvise64",
|
|
||||||
"fadvise64_64",
|
|
||||||
"fallocate",
|
|
||||||
"fanotify_mark",
|
|
||||||
"fchdir",
|
|
||||||
"fchmod",
|
|
||||||
"fchmodat",
|
|
||||||
"fchown",
|
|
||||||
"fchown32",
|
|
||||||
"fchownat",
|
|
||||||
"fcntl",
|
|
||||||
"fcntl64",
|
|
||||||
"fdatasync",
|
|
||||||
"fgetxattr",
|
|
||||||
"flistxattr",
|
|
||||||
"flock",
|
|
||||||
"fork",
|
|
||||||
"fremovexattr",
|
|
||||||
"fsetxattr",
|
|
||||||
"fstat",
|
|
||||||
"fstat64",
|
|
||||||
"fstatat64",
|
|
||||||
"fstatfs",
|
|
||||||
"fstatfs64",
|
|
||||||
"fsync",
|
|
||||||
"ftruncate",
|
|
||||||
"ftruncate64",
|
|
||||||
"futex",
|
|
||||||
"futimesat",
|
|
||||||
"getcpu",
|
|
||||||
"getcwd",
|
|
||||||
"getdents",
|
|
||||||
"getdents64",
|
|
||||||
"getegid",
|
|
||||||
"getegid32",
|
|
||||||
"geteuid",
|
|
||||||
"geteuid32",
|
|
||||||
"getgid",
|
|
||||||
"getgid32",
|
|
||||||
"getgroups",
|
|
||||||
"getgroups32",
|
|
||||||
"getitimer",
|
|
||||||
"getpeername",
|
|
||||||
"getpgid",
|
|
||||||
"getpgrp",
|
|
||||||
"getpid",
|
|
||||||
"getppid",
|
|
||||||
"getpriority",
|
|
||||||
"getrandom",
|
|
||||||
"getresgid",
|
|
||||||
"getresgid32",
|
|
||||||
"getresuid",
|
|
||||||
"getresuid32",
|
|
||||||
"getrlimit",
|
|
||||||
"get_robust_list",
|
|
||||||
"getrusage",
|
|
||||||
"getsid",
|
|
||||||
"getsockname",
|
|
||||||
"getsockopt",
|
|
||||||
"get_thread_area",
|
|
||||||
"gettid",
|
|
||||||
"gettimeofday",
|
|
||||||
"getuid",
|
|
||||||
"getuid32",
|
|
||||||
"getxattr",
|
|
||||||
"inotify_add_watch",
|
|
||||||
"inotify_init",
|
|
||||||
"inotify_init1",
|
|
||||||
"inotify_rm_watch",
|
|
||||||
"io_cancel",
|
|
||||||
"ioctl",
|
|
||||||
"io_destroy",
|
|
||||||
"io_getevents",
|
|
||||||
"ioprio_get",
|
|
||||||
"ioprio_set",
|
|
||||||
"io_setup",
|
|
||||||
"io_submit",
|
|
||||||
"ipc",
|
|
||||||
"kill",
|
|
||||||
"lchown",
|
|
||||||
"lchown32",
|
|
||||||
"lgetxattr",
|
|
||||||
"link",
|
|
||||||
"linkat",
|
|
||||||
"listen",
|
|
||||||
"listxattr",
|
|
||||||
"llistxattr",
|
|
||||||
"_llseek",
|
|
||||||
"lremovexattr",
|
|
||||||
"lseek",
|
|
||||||
"lsetxattr",
|
|
||||||
"lstat",
|
|
||||||
"lstat64",
|
|
||||||
"madvise",
|
|
||||||
"memfd_create",
|
|
||||||
"mincore",
|
|
||||||
"mkdir",
|
|
||||||
"mkdirat",
|
|
||||||
"mknod",
|
|
||||||
"mknodat",
|
|
||||||
"mlock",
|
|
||||||
"mlock2",
|
|
||||||
"mlockall",
|
|
||||||
"mmap",
|
|
||||||
"mmap2",
|
|
||||||
"mprotect",
|
|
||||||
"mq_getsetattr",
|
|
||||||
"mq_notify",
|
|
||||||
"mq_open",
|
|
||||||
"mq_timedreceive",
|
|
||||||
"mq_timedsend",
|
|
||||||
"mq_unlink",
|
|
||||||
"mremap",
|
|
||||||
"msgctl",
|
|
||||||
"msgget",
|
|
||||||
"msgrcv",
|
|
||||||
"msgsnd",
|
|
||||||
"msync",
|
|
||||||
"munlock",
|
|
||||||
"munlockall",
|
|
||||||
"munmap",
|
|
||||||
"nanosleep",
|
|
||||||
"newfstatat",
|
|
||||||
"_newselect",
|
|
||||||
"open",
|
|
||||||
"openat",
|
|
||||||
"pause",
|
|
||||||
"pipe",
|
|
||||||
"pipe2",
|
|
||||||
"poll",
|
|
||||||
"ppoll",
|
|
||||||
"prctl",
|
|
||||||
"pread64",
|
|
||||||
"preadv",
|
|
||||||
"prlimit64",
|
|
||||||
"pselect6",
|
|
||||||
"pwrite64",
|
|
||||||
"pwritev",
|
|
||||||
"read",
|
|
||||||
"readahead",
|
|
||||||
"readlink",
|
|
||||||
"readlinkat",
|
|
||||||
"readv",
|
|
||||||
"recv",
|
|
||||||
"recvfrom",
|
|
||||||
"recvmmsg",
|
|
||||||
"recvmsg",
|
|
||||||
"remap_file_pages",
|
|
||||||
"removexattr",
|
|
||||||
"rename",
|
|
||||||
"renameat",
|
|
||||||
"renameat2",
|
|
||||||
"restart_syscall",
|
|
||||||
"rmdir",
|
|
||||||
"rt_sigaction",
|
|
||||||
"rt_sigpending",
|
|
||||||
"rt_sigprocmask",
|
|
||||||
"rt_sigqueueinfo",
|
|
||||||
"rt_sigreturn",
|
|
||||||
"rt_sigsuspend",
|
|
||||||
"rt_sigtimedwait",
|
|
||||||
"rt_tgsigqueueinfo",
|
|
||||||
"sched_getaffinity",
|
|
||||||
"sched_getattr",
|
|
||||||
"sched_getparam",
|
|
||||||
"sched_get_priority_max",
|
|
||||||
"sched_get_priority_min",
|
|
||||||
"sched_getscheduler",
|
|
||||||
"sched_rr_get_interval",
|
|
||||||
"sched_setaffinity",
|
|
||||||
"sched_setattr",
|
|
||||||
"sched_setparam",
|
|
||||||
"sched_setscheduler",
|
|
||||||
"sched_yield",
|
|
||||||
"seccomp",
|
|
||||||
"select",
|
|
||||||
"semctl",
|
|
||||||
"semget",
|
|
||||||
"semop",
|
|
||||||
"semtimedop",
|
|
||||||
"send",
|
|
||||||
"sendfile",
|
|
||||||
"sendfile64",
|
|
||||||
"sendmmsg",
|
|
||||||
"sendmsg",
|
|
||||||
"sendto",
|
|
||||||
"setfsgid",
|
|
||||||
"setfsgid32",
|
|
||||||
"setfsuid",
|
|
||||||
"setfsuid32",
|
|
||||||
"setgid",
|
|
||||||
"setgid32",
|
|
||||||
"setgroups",
|
|
||||||
"setgroups32",
|
|
||||||
"setitimer",
|
|
||||||
"setpgid",
|
|
||||||
"setpriority",
|
|
||||||
"setregid",
|
|
||||||
"setregid32",
|
|
||||||
"setresgid",
|
|
||||||
"setresgid32",
|
|
||||||
"setresuid",
|
|
||||||
"setresuid32",
|
|
||||||
"setreuid",
|
|
||||||
"setreuid32",
|
|
||||||
"setrlimit",
|
|
||||||
"set_robust_list",
|
|
||||||
"setsid",
|
|
||||||
"setsockopt",
|
|
||||||
"set_thread_area",
|
|
||||||
"set_tid_address",
|
|
||||||
"setuid",
|
|
||||||
"setuid32",
|
|
||||||
"setxattr",
|
|
||||||
"shmat",
|
|
||||||
"shmctl",
|
|
||||||
"shmdt",
|
|
||||||
"shmget",
|
|
||||||
"shutdown",
|
|
||||||
"sigaltstack",
|
|
||||||
"signalfd",
|
|
||||||
"signalfd4",
|
|
||||||
"sigreturn",
|
|
||||||
"socket",
|
|
||||||
"socketcall",
|
|
||||||
"socketpair",
|
|
||||||
"splice",
|
|
||||||
"stat",
|
|
||||||
"stat64",
|
|
||||||
"statfs",
|
|
||||||
"statfs64",
|
|
||||||
"symlink",
|
|
||||||
"symlinkat",
|
|
||||||
"sync",
|
|
||||||
"sync_file_range",
|
|
||||||
"syncfs",
|
|
||||||
"sysinfo",
|
|
||||||
"syslog",
|
|
||||||
"tee",
|
|
||||||
"tgkill",
|
|
||||||
"time",
|
|
||||||
"timer_create",
|
|
||||||
"timer_delete",
|
|
||||||
"timerfd_create",
|
|
||||||
"timerfd_gettime",
|
|
||||||
"timerfd_settime",
|
|
||||||
"timer_getoverrun",
|
|
||||||
"timer_gettime",
|
|
||||||
"timer_settime",
|
|
||||||
"times",
|
|
||||||
"tkill",
|
|
||||||
"truncate",
|
|
||||||
"truncate64",
|
|
||||||
"ugetrlimit",
|
|
||||||
"umask",
|
|
||||||
"uname",
|
|
||||||
"unlink",
|
|
||||||
"unlinkat",
|
|
||||||
"utime",
|
|
||||||
"utimensat",
|
|
||||||
"utimes",
|
|
||||||
"vfork",
|
|
||||||
"vmsplice",
|
|
||||||
"wait4",
|
|
||||||
"waitid",
|
|
||||||
"waitpid",
|
|
||||||
"write",
|
|
||||||
"writev",
|
|
||||||
},
|
|
||||||
Action: rspec.ActAllow,
|
|
||||||
Args: []rspec.LinuxSeccompArg{},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Names: []string{"personality"},
|
|
||||||
Action: rspec.ActAllow,
|
|
||||||
Args: []rspec.LinuxSeccompArg{
|
|
||||||
{
|
|
||||||
Index: 0,
|
|
||||||
Value: 0x0,
|
|
||||||
Op: rspec.OpEqualTo,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Index: 0,
|
|
||||||
Value: 0x0008,
|
|
||||||
Op: rspec.OpEqualTo,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Index: 0,
|
|
||||||
Value: 0xffffffff,
|
|
||||||
Op: rspec.OpEqualTo,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
var sysCloneFlagsIndex uint
|
|
||||||
|
|
||||||
capSysAdmin := false
|
|
||||||
caps := make(map[string]bool)
|
|
||||||
|
|
||||||
for _, cap := range rs.Process.Capabilities.Bounding {
|
|
||||||
caps[cap] = true
|
|
||||||
}
|
|
||||||
for _, cap := range rs.Process.Capabilities.Effective {
|
|
||||||
caps[cap] = true
|
|
||||||
}
|
|
||||||
for _, cap := range rs.Process.Capabilities.Inheritable {
|
|
||||||
caps[cap] = true
|
|
||||||
}
|
|
||||||
for _, cap := range rs.Process.Capabilities.Permitted {
|
|
||||||
caps[cap] = true
|
|
||||||
}
|
|
||||||
for _, cap := range rs.Process.Capabilities.Ambient {
|
|
||||||
caps[cap] = true
|
|
||||||
}
|
|
||||||
|
|
||||||
for cap := range caps {
|
|
||||||
switch cap {
|
|
||||||
case "CAP_DAC_READ_SEARCH":
|
|
||||||
syscalls = append(syscalls, []rspec.LinuxSyscall{
|
|
||||||
{
|
|
||||||
Names: []string{"open_by_handle_at"},
|
|
||||||
Action: rspec.ActAllow,
|
|
||||||
Args: []rspec.LinuxSeccompArg{},
|
|
||||||
},
|
|
||||||
}...)
|
|
||||||
case "CAP_SYS_ADMIN":
|
|
||||||
capSysAdmin = true
|
|
||||||
syscalls = append(syscalls, []rspec.LinuxSyscall{
|
|
||||||
{
|
|
||||||
Names: []string{
|
|
||||||
"bpf",
|
|
||||||
"clone",
|
|
||||||
"fanotify_init",
|
|
||||||
"lookup_dcookie",
|
|
||||||
"mount",
|
|
||||||
"name_to_handle_at",
|
|
||||||
"perf_event_open",
|
|
||||||
"setdomainname",
|
|
||||||
"sethostname",
|
|
||||||
"setns",
|
|
||||||
"umount",
|
|
||||||
"umount2",
|
|
||||||
"unshare",
|
|
||||||
},
|
|
||||||
Action: rspec.ActAllow,
|
|
||||||
Args: []rspec.LinuxSeccompArg{},
|
|
||||||
},
|
|
||||||
}...)
|
|
||||||
case "CAP_SYS_BOOT":
|
|
||||||
syscalls = append(syscalls, []rspec.LinuxSyscall{
|
|
||||||
{
|
|
||||||
Names: []string{"reboot"},
|
|
||||||
Action: rspec.ActAllow,
|
|
||||||
Args: []rspec.LinuxSeccompArg{},
|
|
||||||
},
|
|
||||||
}...)
|
|
||||||
case "CAP_SYS_CHROOT":
|
|
||||||
syscalls = append(syscalls, []rspec.LinuxSyscall{
|
|
||||||
{
|
|
||||||
Names: []string{"chroot"},
|
|
||||||
Action: rspec.ActAllow,
|
|
||||||
Args: []rspec.LinuxSeccompArg{},
|
|
||||||
},
|
|
||||||
}...)
|
|
||||||
case "CAP_SYS_MODULE":
|
|
||||||
syscalls = append(syscalls, []rspec.LinuxSyscall{
|
|
||||||
{
|
|
||||||
Names: []string{
|
|
||||||
"delete_module",
|
|
||||||
"init_module",
|
|
||||||
"finit_module",
|
|
||||||
"query_module",
|
|
||||||
},
|
|
||||||
Action: rspec.ActAllow,
|
|
||||||
Args: []rspec.LinuxSeccompArg{},
|
|
||||||
},
|
|
||||||
}...)
|
|
||||||
case "CAP_SYS_PACCT":
|
|
||||||
syscalls = append(syscalls, []rspec.LinuxSyscall{
|
|
||||||
{
|
|
||||||
Names: []string{"acct"},
|
|
||||||
Action: rspec.ActAllow,
|
|
||||||
Args: []rspec.LinuxSeccompArg{},
|
|
||||||
},
|
|
||||||
}...)
|
|
||||||
case "CAP_SYS_PTRACE":
|
|
||||||
syscalls = append(syscalls, []rspec.LinuxSyscall{
|
|
||||||
{
|
|
||||||
Names: []string{
|
|
||||||
"kcmp",
|
|
||||||
"process_vm_readv",
|
|
||||||
"process_vm_writev",
|
|
||||||
"ptrace",
|
|
||||||
},
|
|
||||||
Action: rspec.ActAllow,
|
|
||||||
Args: []rspec.LinuxSeccompArg{},
|
|
||||||
},
|
|
||||||
}...)
|
|
||||||
case "CAP_SYS_RAWIO":
|
|
||||||
syscalls = append(syscalls, []rspec.LinuxSyscall{
|
|
||||||
{
|
|
||||||
Names: []string{
|
|
||||||
"iopl",
|
|
||||||
"ioperm",
|
|
||||||
},
|
|
||||||
Action: rspec.ActAllow,
|
|
||||||
Args: []rspec.LinuxSeccompArg{},
|
|
||||||
},
|
|
||||||
}...)
|
|
||||||
case "CAP_SYS_TIME":
|
|
||||||
syscalls = append(syscalls, []rspec.LinuxSyscall{
|
|
||||||
{
|
|
||||||
Names: []string{
|
|
||||||
"settimeofday",
|
|
||||||
"stime",
|
|
||||||
"adjtimex",
|
|
||||||
},
|
|
||||||
Action: rspec.ActAllow,
|
|
||||||
Args: []rspec.LinuxSeccompArg{},
|
|
||||||
},
|
|
||||||
}...)
|
|
||||||
case "CAP_SYS_TTY_CONFIG":
|
|
||||||
syscalls = append(syscalls, []rspec.LinuxSyscall{
|
|
||||||
{
|
|
||||||
Names: []string{"vhangup"},
|
|
||||||
Action: rspec.ActAllow,
|
|
||||||
Args: []rspec.LinuxSeccompArg{},
|
|
||||||
},
|
|
||||||
}...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !capSysAdmin {
|
|
||||||
syscalls = append(syscalls, []rspec.LinuxSyscall{
|
|
||||||
{
|
|
||||||
Names: []string{"clone"},
|
|
||||||
Action: rspec.ActAllow,
|
|
||||||
Args: []rspec.LinuxSeccompArg{
|
|
||||||
{
|
|
||||||
Index: sysCloneFlagsIndex,
|
|
||||||
Value: CloneNewNS | CloneNewUTS | CloneNewIPC | CloneNewUser | CloneNewPID | CloneNewNet,
|
|
||||||
ValueTwo: 0,
|
|
||||||
Op: rspec.OpMaskedEqual,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}...)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
arch := runtime.GOARCH
|
|
||||||
switch arch {
|
|
||||||
case "arm", "arm64":
|
|
||||||
syscalls = append(syscalls, []rspec.LinuxSyscall{
|
|
||||||
{
|
|
||||||
Names: []string{
|
|
||||||
"breakpoint",
|
|
||||||
"cacheflush",
|
|
||||||
"set_tls",
|
|
||||||
},
|
|
||||||
Action: rspec.ActAllow,
|
|
||||||
Args: []rspec.LinuxSeccompArg{},
|
|
||||||
},
|
|
||||||
}...)
|
|
||||||
case "amd64", "x32":
|
|
||||||
syscalls = append(syscalls, []rspec.LinuxSyscall{
|
|
||||||
{
|
|
||||||
Names: []string{"arch_prctl"},
|
|
||||||
Action: rspec.ActAllow,
|
|
||||||
Args: []rspec.LinuxSeccompArg{},
|
|
||||||
},
|
|
||||||
}...)
|
|
||||||
fallthrough
|
|
||||||
case "x86":
|
|
||||||
syscalls = append(syscalls, []rspec.LinuxSyscall{
|
|
||||||
{
|
|
||||||
Names: []string{"modify_ldt"},
|
|
||||||
Action: rspec.ActAllow,
|
|
||||||
Args: []rspec.LinuxSeccompArg{},
|
|
||||||
},
|
|
||||||
}...)
|
|
||||||
case "s390", "s390x":
|
|
||||||
syscalls = append(syscalls, []rspec.LinuxSyscall{
|
|
||||||
{
|
|
||||||
Names: []string{
|
|
||||||
"s390_pci_mmio_read",
|
|
||||||
"s390_pci_mmio_write",
|
|
||||||
"s390_runtime_instr",
|
|
||||||
},
|
|
||||||
Action: rspec.ActAllow,
|
|
||||||
Args: []rspec.LinuxSeccompArg{},
|
|
||||||
},
|
|
||||||
}...)
|
|
||||||
/* Flags parameter of the clone syscall is the 2nd on s390 */
|
|
||||||
}
|
|
||||||
|
|
||||||
return &rspec.LinuxSeccomp{
|
|
||||||
DefaultAction: rspec.ActErrno,
|
|
||||||
Architectures: arches(),
|
|
||||||
Syscalls: syscalls,
|
|
||||||
}
|
|
||||||
}
|
|
15
vendor/github.com/opencontainers/runtime-tools/generate/seccomp/seccomp_default_linux.go
generated
vendored
15
vendor/github.com/opencontainers/runtime-tools/generate/seccomp/seccomp_default_linux.go
generated
vendored
@ -1,15 +0,0 @@
|
|||||||
// +build linux
|
|
||||||
|
|
||||||
package seccomp
|
|
||||||
|
|
||||||
import "syscall"
|
|
||||||
|
|
||||||
// System values passed through on linux
|
|
||||||
const (
|
|
||||||
CloneNewIPC = syscall.CLONE_NEWIPC
|
|
||||||
CloneNewNet = syscall.CLONE_NEWNET
|
|
||||||
CloneNewNS = syscall.CLONE_NEWNS
|
|
||||||
CloneNewPID = syscall.CLONE_NEWPID
|
|
||||||
CloneNewUser = syscall.CLONE_NEWUSER
|
|
||||||
CloneNewUTS = syscall.CLONE_NEWUTS
|
|
||||||
)
|
|
@ -1,15 +0,0 @@
|
|||||||
// +build !linux
|
|
||||||
|
|
||||||
package seccomp
|
|
||||||
|
|
||||||
// These are copied from linux/amd64 syscall values, as a reference for other
|
|
||||||
// platforms to have access to
|
|
||||||
const (
|
|
||||||
CloneNewIPC = 0x8000000
|
|
||||||
CloneNewNet = 0x40000000
|
|
||||||
CloneNewNS = 0x20000
|
|
||||||
CloneNewPID = 0x20000000
|
|
||||||
CloneNewUser = 0x10000000
|
|
||||||
CloneNewUTS = 0x4000000
|
|
||||||
CloneNewCgroup = 0x02000000
|
|
||||||
)
|
|
140
vendor/github.com/opencontainers/runtime-tools/generate/seccomp/syscall_compare.go
generated
vendored
140
vendor/github.com/opencontainers/runtime-tools/generate/seccomp/syscall_compare.go
generated
vendored
@ -1,140 +0,0 @@
|
|||||||
package seccomp
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
rspec "github.com/opencontainers/runtime-spec/specs-go"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Determine if a new syscall rule should be appended, overwrite an existing rule
|
|
||||||
// or if no action should be taken at all
|
|
||||||
func decideCourseOfAction(newSyscall *rspec.LinuxSyscall, syscalls []rspec.LinuxSyscall) (string, error) {
|
|
||||||
ruleForSyscallAlreadyExists := false
|
|
||||||
|
|
||||||
var sliceOfDeterminedActions []string
|
|
||||||
for i, syscall := range syscalls {
|
|
||||||
if sameName(&syscall, newSyscall) {
|
|
||||||
ruleForSyscallAlreadyExists = true
|
|
||||||
|
|
||||||
if identical(newSyscall, &syscall) {
|
|
||||||
sliceOfDeterminedActions = append(sliceOfDeterminedActions, nothing)
|
|
||||||
}
|
|
||||||
|
|
||||||
if sameAction(newSyscall, &syscall) {
|
|
||||||
if bothHaveArgs(newSyscall, &syscall) {
|
|
||||||
sliceOfDeterminedActions = append(sliceOfDeterminedActions, seccompAppend)
|
|
||||||
}
|
|
||||||
if onlyOneHasArgs(newSyscall, &syscall) {
|
|
||||||
if firstParamOnlyHasArgs(newSyscall, &syscall) {
|
|
||||||
sliceOfDeterminedActions = append(sliceOfDeterminedActions, "overwrite:"+strconv.Itoa(i))
|
|
||||||
} else {
|
|
||||||
sliceOfDeterminedActions = append(sliceOfDeterminedActions, nothing)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !sameAction(newSyscall, &syscall) {
|
|
||||||
if bothHaveArgs(newSyscall, &syscall) {
|
|
||||||
if sameArgs(newSyscall, &syscall) {
|
|
||||||
sliceOfDeterminedActions = append(sliceOfDeterminedActions, "overwrite:"+strconv.Itoa(i))
|
|
||||||
}
|
|
||||||
if !sameArgs(newSyscall, &syscall) {
|
|
||||||
sliceOfDeterminedActions = append(sliceOfDeterminedActions, seccompAppend)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if onlyOneHasArgs(newSyscall, &syscall) {
|
|
||||||
sliceOfDeterminedActions = append(sliceOfDeterminedActions, seccompAppend)
|
|
||||||
}
|
|
||||||
if neitherHasArgs(newSyscall, &syscall) {
|
|
||||||
sliceOfDeterminedActions = append(sliceOfDeterminedActions, "overwrite:"+strconv.Itoa(i))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !ruleForSyscallAlreadyExists {
|
|
||||||
sliceOfDeterminedActions = append(sliceOfDeterminedActions, seccompAppend)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Nothing has highest priority
|
|
||||||
for _, determinedAction := range sliceOfDeterminedActions {
|
|
||||||
if determinedAction == nothing {
|
|
||||||
return determinedAction, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Overwrite has second highest priority
|
|
||||||
for _, determinedAction := range sliceOfDeterminedActions {
|
|
||||||
if strings.Contains(determinedAction, seccompOverwrite) {
|
|
||||||
return determinedAction, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Append has the lowest priority
|
|
||||||
for _, determinedAction := range sliceOfDeterminedActions {
|
|
||||||
if determinedAction == seccompAppend {
|
|
||||||
return determinedAction, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return "", fmt.Errorf("Trouble determining action: %s", sliceOfDeterminedActions)
|
|
||||||
}
|
|
||||||
|
|
||||||
func hasArguments(config *rspec.LinuxSyscall) bool {
|
|
||||||
nilSyscall := new(rspec.LinuxSyscall)
|
|
||||||
return !sameArgs(nilSyscall, config)
|
|
||||||
}
|
|
||||||
|
|
||||||
func identical(config1, config2 *rspec.LinuxSyscall) bool {
|
|
||||||
return reflect.DeepEqual(config1, config2)
|
|
||||||
}
|
|
||||||
|
|
||||||
func identicalExceptAction(config1, config2 *rspec.LinuxSyscall) bool {
|
|
||||||
samename := sameName(config1, config2)
|
|
||||||
sameAction := sameAction(config1, config2)
|
|
||||||
sameArgs := sameArgs(config1, config2)
|
|
||||||
|
|
||||||
return samename && !sameAction && sameArgs
|
|
||||||
}
|
|
||||||
|
|
||||||
func identicalExceptArgs(config1, config2 *rspec.LinuxSyscall) bool {
|
|
||||||
samename := sameName(config1, config2)
|
|
||||||
sameAction := sameAction(config1, config2)
|
|
||||||
sameArgs := sameArgs(config1, config2)
|
|
||||||
|
|
||||||
return samename && sameAction && !sameArgs
|
|
||||||
}
|
|
||||||
|
|
||||||
func sameName(config1, config2 *rspec.LinuxSyscall) bool {
|
|
||||||
return reflect.DeepEqual(config1.Names, config2.Names)
|
|
||||||
}
|
|
||||||
|
|
||||||
func sameAction(config1, config2 *rspec.LinuxSyscall) bool {
|
|
||||||
return config1.Action == config2.Action
|
|
||||||
}
|
|
||||||
|
|
||||||
func sameArgs(config1, config2 *rspec.LinuxSyscall) bool {
|
|
||||||
return reflect.DeepEqual(config1.Args, config2.Args)
|
|
||||||
}
|
|
||||||
|
|
||||||
func bothHaveArgs(config1, config2 *rspec.LinuxSyscall) bool {
|
|
||||||
return hasArguments(config1) && hasArguments(config2)
|
|
||||||
}
|
|
||||||
|
|
||||||
func onlyOneHasArgs(config1, config2 *rspec.LinuxSyscall) bool {
|
|
||||||
conf1 := hasArguments(config1)
|
|
||||||
conf2 := hasArguments(config2)
|
|
||||||
|
|
||||||
return (conf1 && !conf2) || (!conf1 && conf2)
|
|
||||||
}
|
|
||||||
|
|
||||||
func neitherHasArgs(config1, config2 *rspec.LinuxSyscall) bool {
|
|
||||||
return !hasArguments(config1) && !hasArguments(config2)
|
|
||||||
}
|
|
||||||
|
|
||||||
func firstParamOnlyHasArgs(config1, config2 *rspec.LinuxSyscall) bool {
|
|
||||||
return !hasArguments(config1) && hasArguments(config2)
|
|
||||||
}
|
|
29
vendor/github.com/opencontainers/runtime-tools/specerror/bundle.go
generated
vendored
29
vendor/github.com/opencontainers/runtime-tools/specerror/bundle.go
generated
vendored
@ -1,29 +0,0 @@
|
|||||||
package specerror
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
rfc2119 "github.com/opencontainers/runtime-tools/error"
|
|
||||||
)
|
|
||||||
|
|
||||||
// define error codes
|
|
||||||
const (
|
|
||||||
// ConfigInRootBundleDir represents "This REQUIRED file MUST reside in the root of the bundle directory"
|
|
||||||
ConfigInRootBundleDir Code = 0xa001 + iota
|
|
||||||
// ConfigConstName represents "This REQUIRED file MUST be named `config.json`."
|
|
||||||
ConfigConstName
|
|
||||||
// ArtifactsInSingleDir represents "When supplied, while these artifacts MUST all be present in a single directory on the local filesystem, that directory itself is not part of the bundle."
|
|
||||||
ArtifactsInSingleDir
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
containerFormatRef = func(version string) (reference string, err error) {
|
|
||||||
return fmt.Sprintf(referenceTemplate, version, "bundle.md#container-format"), nil
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
register(ConfigInRootBundleDir, rfc2119.Must, containerFormatRef)
|
|
||||||
register(ConfigConstName, rfc2119.Must, containerFormatRef)
|
|
||||||
register(ArtifactsInSingleDir, rfc2119.Must, containerFormatRef)
|
|
||||||
}
|
|
134
vendor/github.com/opencontainers/runtime-tools/specerror/config-linux.go
generated
vendored
134
vendor/github.com/opencontainers/runtime-tools/specerror/config-linux.go
generated
vendored
@ -1,134 +0,0 @@
|
|||||||
package specerror
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
rfc2119 "github.com/opencontainers/runtime-tools/error"
|
|
||||||
)
|
|
||||||
|
|
||||||
// define error codes
|
|
||||||
const (
|
|
||||||
// DefaultFilesystems represents "The following filesystems SHOULD be made available in each container's filesystem:"
|
|
||||||
DefaultFilesystems Code = 0xc001 + iota
|
|
||||||
// NSPathAbs represents "This value MUST be an absolute path in the runtime mount namespace."
|
|
||||||
NSPathAbs
|
|
||||||
// NSProcInPath represents "The runtime MUST place the container process in the namespace associated with that `path`."
|
|
||||||
NSProcInPath
|
|
||||||
// NSPathMatchTypeError represents "The runtime MUST generate an error if `path` is not associated with a namespace of type `type`."
|
|
||||||
NSPathMatchTypeError
|
|
||||||
// NSNewNSWithoutPath represents "If `path` is not specified, the runtime MUST create a new container namespace of type `type`."
|
|
||||||
NSNewNSWithoutPath
|
|
||||||
// NSInheritWithoutType represents "If a namespace type is not specified in the `namespaces` array, the container MUST inherit the runtime namespace of that type."
|
|
||||||
NSInheritWithoutType
|
|
||||||
// NSErrorOnDup represents "If a `namespaces` field contains duplicated namespaces with same `type`, the runtime MUST generate an error."
|
|
||||||
NSErrorOnDup
|
|
||||||
// UserNSMapOwnershipRO represents "The runtime SHOULD NOT modify the ownership of referenced filesystems to realize the mapping."
|
|
||||||
UserNSMapOwnershipRO
|
|
||||||
// DevicesAvailable represents "devices (array of objects, OPTIONAL) lists devices that MUST be available in the container."
|
|
||||||
DevicesAvailable
|
|
||||||
// DevicesFileNotMatch represents "If a file already exists at `path` that does not match the requested device, the runtime MUST generate an error."
|
|
||||||
DevicesFileNotMatch
|
|
||||||
// DevicesMajMinRequired represents "`major, minor` (int64, REQUIRED unless `type` is `p`) - major, minor numbers for the device."
|
|
||||||
DevicesMajMinRequired
|
|
||||||
// DevicesErrorOnDup represents "The same `type`, `major` and `minor` SHOULD NOT be used for multiple devices."
|
|
||||||
DevicesErrorOnDup
|
|
||||||
// DefaultDevices represents "In addition to any devices configured with this setting, the runtime MUST also supply default devices."
|
|
||||||
DefaultDevices
|
|
||||||
// CgroupsPathAbsOrRel represents "The value of `cgroupsPath` MUST be either an absolute path or a relative path."
|
|
||||||
CgroupsPathAbsOrRel
|
|
||||||
// CgroupsAbsPathRelToMount represents "In the case of an absolute path (starting with `/`), the runtime MUST take the path to be relative to the cgroups mount point."
|
|
||||||
CgroupsAbsPathRelToMount
|
|
||||||
// CgroupsPathAttach represents "If the value is specified, the runtime MUST consistently attach to the same place in the cgroups hierarchy given the same value of `cgroupsPath`."
|
|
||||||
CgroupsPathAttach
|
|
||||||
// CgroupsPathError represents "Runtimes MAY consider certain `cgroupsPath` values to be invalid, and MUST generate an error if this is the case."
|
|
||||||
CgroupsPathError
|
|
||||||
// DevicesApplyInOrder represents "The runtime MUST apply entries in the listed order."
|
|
||||||
DevicesApplyInOrder
|
|
||||||
// BlkIOWeightOrLeafWeightExist represents "You MUST specify at least one of `weight` or `leafWeight` in a given entry, and MAY specify both."
|
|
||||||
BlkIOWeightOrLeafWeightExist
|
|
||||||
// IntelRdtPIDWrite represents "If `intelRdt` is set, the runtime MUST write the container process ID to the `<container-id>/tasks` file in a mounted `resctrl` pseudo-filesystem, using the container ID from `start` and creating the `container-id` directory if necessary."
|
|
||||||
IntelRdtPIDWrite
|
|
||||||
// IntelRdtNoMountedResctrlError represents "If no mounted `resctrl` pseudo-filesystem is available in the runtime mount namespace, the runtime MUST generate an error."
|
|
||||||
IntelRdtNoMountedResctrlError
|
|
||||||
// NotManipResctrlWithoutIntelRdt represents "If `intelRdt` is not set, the runtime MUST NOT manipulate any `resctrl` pseudo-filesystems."
|
|
||||||
NotManipResctrlWithoutIntelRdt
|
|
||||||
// IntelRdtL3CacheSchemaWrite represents "If `l3CacheSchema` is set, runtimes MUST write the value to the `schemata` file in the `<container-id>` directory discussed in `intelRdt`."
|
|
||||||
IntelRdtL3CacheSchemaWrite
|
|
||||||
// IntelRdtL3CacheSchemaNotWrite represents "If `l3CacheSchema` is not set, runtimes MUST NOT write to `schemata` files in any `resctrl` pseudo-filesystems."
|
|
||||||
IntelRdtL3CacheSchemaNotWrite
|
|
||||||
// SeccSyscallsNamesRequired represents "`names` MUST contain at least one entry."
|
|
||||||
SeccSyscallsNamesRequired
|
|
||||||
// MaskedPathsAbs represents "maskedPaths (array of strings, OPTIONAL) will mask over the provided paths inside the container so that they cannot be read. The values MUST be absolute paths in the container namespace."
|
|
||||||
MaskedPathsAbs
|
|
||||||
// ReadonlyPathsAbs represents "readonlyPaths (array of strings, OPTIONAL) will set the provided paths as readonly inside the container. The values MUST be absolute paths in the container namespace."
|
|
||||||
ReadonlyPathsAbs
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
defaultFilesystemsRef = func(version string) (reference string, err error) {
|
|
||||||
return fmt.Sprintf(referenceTemplate, version, "config-linux.md#default-filesystems"), nil
|
|
||||||
}
|
|
||||||
namespacesRef = func(version string) (reference string, err error) {
|
|
||||||
return fmt.Sprintf(referenceTemplate, version, "config-linux.md#namespaces"), nil
|
|
||||||
}
|
|
||||||
userNamespaceMappingsRef = func(version string) (reference string, err error) {
|
|
||||||
return fmt.Sprintf(referenceTemplate, version, "config-linux.md#user-namespace-mappings"), nil
|
|
||||||
}
|
|
||||||
devicesRef = func(version string) (reference string, err error) {
|
|
||||||
return fmt.Sprintf(referenceTemplate, version, "config-linux.md#devices"), nil
|
|
||||||
}
|
|
||||||
defaultDevicesRef = func(version string) (reference string, err error) {
|
|
||||||
return fmt.Sprintf(referenceTemplate, version, "config-linux.md#default-devices"), nil
|
|
||||||
}
|
|
||||||
cgroupsPathRef = func(version string) (reference string, err error) {
|
|
||||||
return fmt.Sprintf(referenceTemplate, version, "config-linux.md#cgroups-path"), nil
|
|
||||||
}
|
|
||||||
deviceWhitelistRef = func(version string) (reference string, err error) {
|
|
||||||
return fmt.Sprintf(referenceTemplate, version, "config-linux.md#device-whitelist"), nil
|
|
||||||
}
|
|
||||||
blockIoRef = func(version string) (reference string, err error) {
|
|
||||||
return fmt.Sprintf(referenceTemplate, version, "config-linux.md#block-io"), nil
|
|
||||||
}
|
|
||||||
intelrdtRef = func(version string) (reference string, err error) {
|
|
||||||
return fmt.Sprintf(referenceTemplate, version, "config-linux.md#intelrdt"), nil
|
|
||||||
}
|
|
||||||
seccompRef = func(version string) (reference string, err error) {
|
|
||||||
return fmt.Sprintf(referenceTemplate, version, "config-linux.md#seccomp"), nil
|
|
||||||
}
|
|
||||||
maskedPathsRef = func(version string) (reference string, err error) {
|
|
||||||
return fmt.Sprintf(referenceTemplate, version, "config-linux.md#masked-paths"), nil
|
|
||||||
}
|
|
||||||
readonlyPathsRef = func(version string) (reference string, err error) {
|
|
||||||
return fmt.Sprintf(referenceTemplate, version, "config-linux.md#readonly-paths"), nil
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
register(DefaultFilesystems, rfc2119.Should, defaultFilesystemsRef)
|
|
||||||
register(NSPathAbs, rfc2119.Must, namespacesRef)
|
|
||||||
register(NSProcInPath, rfc2119.Must, namespacesRef)
|
|
||||||
register(NSPathMatchTypeError, rfc2119.Must, namespacesRef)
|
|
||||||
register(NSNewNSWithoutPath, rfc2119.Must, namespacesRef)
|
|
||||||
register(NSInheritWithoutType, rfc2119.Must, namespacesRef)
|
|
||||||
register(NSErrorOnDup, rfc2119.Must, namespacesRef)
|
|
||||||
register(UserNSMapOwnershipRO, rfc2119.Should, userNamespaceMappingsRef)
|
|
||||||
register(DevicesAvailable, rfc2119.Must, devicesRef)
|
|
||||||
register(DevicesFileNotMatch, rfc2119.Must, devicesRef)
|
|
||||||
register(DevicesMajMinRequired, rfc2119.Required, devicesRef)
|
|
||||||
register(DevicesErrorOnDup, rfc2119.Should, devicesRef)
|
|
||||||
register(DefaultDevices, rfc2119.Must, defaultDevicesRef)
|
|
||||||
register(CgroupsPathAbsOrRel, rfc2119.Must, cgroupsPathRef)
|
|
||||||
register(CgroupsAbsPathRelToMount, rfc2119.Must, cgroupsPathRef)
|
|
||||||
register(CgroupsPathAttach, rfc2119.Must, cgroupsPathRef)
|
|
||||||
register(CgroupsPathError, rfc2119.Must, cgroupsPathRef)
|
|
||||||
register(DevicesApplyInOrder, rfc2119.Must, deviceWhitelistRef)
|
|
||||||
register(BlkIOWeightOrLeafWeightExist, rfc2119.Must, blockIoRef)
|
|
||||||
register(IntelRdtPIDWrite, rfc2119.Must, intelrdtRef)
|
|
||||||
register(IntelRdtNoMountedResctrlError, rfc2119.Must, intelrdtRef)
|
|
||||||
register(NotManipResctrlWithoutIntelRdt, rfc2119.Must, intelrdtRef)
|
|
||||||
register(IntelRdtL3CacheSchemaWrite, rfc2119.Must, intelrdtRef)
|
|
||||||
register(IntelRdtL3CacheSchemaNotWrite, rfc2119.Must, intelrdtRef)
|
|
||||||
register(SeccSyscallsNamesRequired, rfc2119.Must, seccompRef)
|
|
||||||
register(MaskedPathsAbs, rfc2119.Must, maskedPathsRef)
|
|
||||||
register(ReadonlyPathsAbs, rfc2119.Must, readonlyPathsRef)
|
|
||||||
}
|
|
32
vendor/github.com/opencontainers/runtime-tools/specerror/config-windows.go
generated
vendored
32
vendor/github.com/opencontainers/runtime-tools/specerror/config-windows.go
generated
vendored
@ -1,32 +0,0 @@
|
|||||||
package specerror
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
rfc2119 "github.com/opencontainers/runtime-tools/error"
|
|
||||||
)
|
|
||||||
|
|
||||||
// define error codes
|
|
||||||
const (
|
|
||||||
// WindowsLayerFoldersRequired represents "`layerFolders` MUST contain at least one entry."
|
|
||||||
WindowsLayerFoldersRequired Code = 0xd001 + iota
|
|
||||||
// WindowsHyperVPresent represents "If present, the container MUST be run with Hyper-V isolation."
|
|
||||||
WindowsHyperVPresent
|
|
||||||
// WindowsHyperVOmit represents "If omitted, the container MUST be run as a Windows Server container."
|
|
||||||
WindowsHyperVOmit
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
layerfoldersRef = func(version string) (reference string, err error) {
|
|
||||||
return fmt.Sprintf(referenceTemplate, version, "config-windows.md#layerfolders"), nil
|
|
||||||
}
|
|
||||||
hypervRef = func(version string) (reference string, err error) {
|
|
||||||
return fmt.Sprintf(referenceTemplate, version, "config-windows.md#hyperv"), nil
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
register(WindowsLayerFoldersRequired, rfc2119.Must, layerfoldersRef)
|
|
||||||
register(WindowsHyperVPresent, rfc2119.Must, hypervRef)
|
|
||||||
register(WindowsHyperVOmit, rfc2119.Must, hypervRef)
|
|
||||||
}
|
|
188
vendor/github.com/opencontainers/runtime-tools/specerror/config.go
generated
vendored
188
vendor/github.com/opencontainers/runtime-tools/specerror/config.go
generated
vendored
@ -1,188 +0,0 @@
|
|||||||
package specerror
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
rfc2119 "github.com/opencontainers/runtime-tools/error"
|
|
||||||
)
|
|
||||||
|
|
||||||
// define error codes
|
|
||||||
const (
|
|
||||||
// SpecVersionInSemVer represents "`ociVersion` (string, REQUIRED) MUST be in SemVer v2.0.0 format and specifies the version of the Open Container Initiative Runtime Specification with which the bundle complies."
|
|
||||||
SpecVersionInSemVer Code = 0xb001 + iota
|
|
||||||
// RootOnWindowsRequired represents "On Windows, for Windows Server Containers, this field is REQUIRED."
|
|
||||||
RootOnWindowsRequired
|
|
||||||
// RootOnHyperVNotSet represents "For Hyper-V Containers, this field MUST NOT be set."
|
|
||||||
RootOnHyperVNotSet
|
|
||||||
// RootOnNonWindowsRequired represents "On all other platforms, this field is REQUIRED."
|
|
||||||
RootOnNonWindowsRequired
|
|
||||||
// RootPathOnWindowsGUID represents "On Windows, `path` MUST be a volume GUID path."
|
|
||||||
RootPathOnWindowsGUID
|
|
||||||
// RootPathOnPosixConvention represents "The value SHOULD be the conventional `rootfs`."
|
|
||||||
RootPathOnPosixConvention
|
|
||||||
// RootPathExist represents "A directory MUST exist at the path declared by the field."
|
|
||||||
RootPathExist
|
|
||||||
// RootReadonlyImplement represents "`readonly` (bool, OPTIONAL) If true then the root filesystem MUST be read-only inside the container, defaults to false."
|
|
||||||
RootReadonlyImplement
|
|
||||||
// RootReadonlyOnWindowsFalse represents "* On Windows, this field MUST be omitted or false."
|
|
||||||
RootReadonlyOnWindowsFalse
|
|
||||||
// MountsInOrder represents "The runtime MUST mount entries in the listed order."
|
|
||||||
MountsInOrder
|
|
||||||
// MountsDestAbs represents "Destination of mount point: path inside container. This value MUST be an absolute path."
|
|
||||||
MountsDestAbs
|
|
||||||
// MountsDestOnWindowsNotNested represents "Windows: one mount destination MUST NOT be nested within another mount (e.g., c:\\foo and c:\\foo\\bar)."
|
|
||||||
MountsDestOnWindowsNotNested
|
|
||||||
// MountsOptionsOnWindowsROSupport represents "Windows: runtimes MUST support `ro`, mounting the filesystem read-only when `ro` is given."
|
|
||||||
MountsOptionsOnWindowsROSupport
|
|
||||||
// ProcRequiredAtStart represents "This property is REQUIRED when `start` is called."
|
|
||||||
ProcRequiredAtStart
|
|
||||||
// ProcConsoleSizeIgnore represents "Runtimes MUST ignore `consoleSize` if `terminal` is `false` or unset."
|
|
||||||
ProcConsoleSizeIgnore
|
|
||||||
// ProcCwdAbs represents "cwd (string, REQUIRED) is the working directory that will be set for the executable. This value MUST be an absolute path."
|
|
||||||
ProcCwdAbs
|
|
||||||
// ProcArgsOneEntryRequired represents "This specification extends the IEEE standard in that at least one entry is REQUIRED, and that entry is used with the same semantics as `execvp`'s *file*."
|
|
||||||
ProcArgsOneEntryRequired
|
|
||||||
// PosixProcRlimitsTypeGenError represents "The runtime MUST generate an error for any values which cannot be mapped to a relevant kernel interface."
|
|
||||||
PosixProcRlimitsTypeGenError
|
|
||||||
// PosixProcRlimitsTypeGet represents "For each entry in `rlimits`, a `getrlimit(3)` on `type` MUST succeed."
|
|
||||||
PosixProcRlimitsTypeGet
|
|
||||||
// PosixProcRlimitsTypeValueError represents "valid values are defined in the ... man page"
|
|
||||||
PosixProcRlimitsTypeValueError
|
|
||||||
// PosixProcRlimitsSoftMatchCur represents "`rlim.rlim_cur` MUST match the configured value."
|
|
||||||
PosixProcRlimitsSoftMatchCur
|
|
||||||
// PosixProcRlimitsHardMatchMax represents "`rlim.rlim_max` MUST match the configured value."
|
|
||||||
PosixProcRlimitsHardMatchMax
|
|
||||||
// PosixProcRlimitsErrorOnDup represents "If `rlimits` contains duplicated entries with same `type`, the runtime MUST generate an error."
|
|
||||||
PosixProcRlimitsErrorOnDup
|
|
||||||
// LinuxProcCapError represents "Any value which cannot be mapped to a relevant kernel interface MUST cause an error."
|
|
||||||
LinuxProcCapError
|
|
||||||
// LinuxProcOomScoreAdjSet represents "If `oomScoreAdj` is set, the runtime MUST set `oom_score_adj` to the given value."
|
|
||||||
LinuxProcOomScoreAdjSet
|
|
||||||
// LinuxProcOomScoreAdjNotSet represents "If `oomScoreAdj` is not set, the runtime MUST NOT change the value of `oom_score_adj`."
|
|
||||||
LinuxProcOomScoreAdjNotSet
|
|
||||||
// PlatformSpecConfOnWindowsSet represents "This MUST be set if the target platform of this spec is `windows`."
|
|
||||||
PlatformSpecConfOnWindowsSet
|
|
||||||
// PosixHooksPathAbs represents "This specification extends the IEEE standard in that `path` MUST be absolute."
|
|
||||||
PosixHooksPathAbs
|
|
||||||
// PosixHooksTimeoutPositive represents "If set, `timeout` MUST be greater than zero."
|
|
||||||
PosixHooksTimeoutPositive
|
|
||||||
// PosixHooksCalledInOrder represents "Hooks MUST be called in the listed order."
|
|
||||||
PosixHooksCalledInOrder
|
|
||||||
// PosixHooksStateToStdin represents "The state of the container MUST be passed to hooks over stdin so that they may do work appropriate to the current state of the container."
|
|
||||||
PosixHooksStateToStdin
|
|
||||||
// PrestartTiming represents "The pre-start hooks MUST be called after the `start` operation is called but before the user-specified program command is executed."
|
|
||||||
PrestartTiming
|
|
||||||
// PoststartTiming represents "The post-start hooks MUST be called after the user-specified process is executed but before the `start` operation returns."
|
|
||||||
PoststartTiming
|
|
||||||
// PoststopTiming represents "The post-stop hooks MUST be called after the container is deleted but before the `delete` operation returns."
|
|
||||||
PoststopTiming
|
|
||||||
// AnnotationsKeyValueMap represents "Annotations MUST be a key-value map."
|
|
||||||
AnnotationsKeyValueMap
|
|
||||||
// AnnotationsKeyString represents "Keys MUST be strings."
|
|
||||||
AnnotationsKeyString
|
|
||||||
// AnnotationsKeyRequired represents "Keys MUST NOT be an empty string."
|
|
||||||
AnnotationsKeyRequired
|
|
||||||
// AnnotationsKeyReversedDomain represents "Keys SHOULD be named using a reverse domain notation - e.g. `com.example.myKey`."
|
|
||||||
AnnotationsKeyReversedDomain
|
|
||||||
// AnnotationsKeyReservedNS represents "Keys using the `org.opencontainers` namespace are reserved and MUST NOT be used by subsequent specifications."
|
|
||||||
AnnotationsKeyReservedNS
|
|
||||||
// AnnotationsKeyIgnoreUnknown represents "Implementations that are reading/processing this configuration file MUST NOT generate an error if they encounter an unknown annotation key."
|
|
||||||
AnnotationsKeyIgnoreUnknown
|
|
||||||
// AnnotationsValueString represents "Values MUST be strings."
|
|
||||||
AnnotationsValueString
|
|
||||||
// ExtensibilityIgnoreUnknownProp represents "Runtimes that are reading or processing this configuration file MUST NOT generate an error if they encounter an unknown property."
|
|
||||||
ExtensibilityIgnoreUnknownProp
|
|
||||||
// ValidValues represents "Runtimes that are reading or processing this configuration file MUST generate an error when invalid or unsupported values are encountered."
|
|
||||||
ValidValues
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
specificationVersionRef = func(version string) (reference string, err error) {
|
|
||||||
return fmt.Sprintf(referenceTemplate, version, "config.md#specification-version"), nil
|
|
||||||
}
|
|
||||||
rootRef = func(version string) (reference string, err error) {
|
|
||||||
return fmt.Sprintf(referenceTemplate, version, "config.md#root"), nil
|
|
||||||
}
|
|
||||||
mountsRef = func(version string) (reference string, err error) {
|
|
||||||
return fmt.Sprintf(referenceTemplate, version, "config.md#mounts"), nil
|
|
||||||
}
|
|
||||||
processRef = func(version string) (reference string, err error) {
|
|
||||||
return fmt.Sprintf(referenceTemplate, version, "config.md#process"), nil
|
|
||||||
}
|
|
||||||
posixProcessRef = func(version string) (reference string, err error) {
|
|
||||||
return fmt.Sprintf(referenceTemplate, version, "config.md#posix-process"), nil
|
|
||||||
}
|
|
||||||
linuxProcessRef = func(version string) (reference string, err error) {
|
|
||||||
return fmt.Sprintf(referenceTemplate, version, "config.md#linux-process"), nil
|
|
||||||
}
|
|
||||||
platformSpecificConfigurationRef = func(version string) (reference string, err error) {
|
|
||||||
return fmt.Sprintf(referenceTemplate, version, "config.md#platform-specific-configuration"), nil
|
|
||||||
}
|
|
||||||
posixPlatformHooksRef = func(version string) (reference string, err error) {
|
|
||||||
return fmt.Sprintf(referenceTemplate, version, "config.md#posix-platform-hooks"), nil
|
|
||||||
}
|
|
||||||
prestartRef = func(version string) (reference string, err error) {
|
|
||||||
return fmt.Sprintf(referenceTemplate, version, "config.md#prestart"), nil
|
|
||||||
}
|
|
||||||
poststartRef = func(version string) (reference string, err error) {
|
|
||||||
return fmt.Sprintf(referenceTemplate, version, "config.md#poststart"), nil
|
|
||||||
}
|
|
||||||
poststopRef = func(version string) (reference string, err error) {
|
|
||||||
return fmt.Sprintf(referenceTemplate, version, "config.md#poststop"), nil
|
|
||||||
}
|
|
||||||
annotationsRef = func(version string) (reference string, err error) {
|
|
||||||
return fmt.Sprintf(referenceTemplate, version, "config.md#annotations"), nil
|
|
||||||
}
|
|
||||||
extensibilityRef = func(version string) (reference string, err error) {
|
|
||||||
return fmt.Sprintf(referenceTemplate, version, "config.md#extensibility"), nil
|
|
||||||
}
|
|
||||||
validValuesRef = func(version string) (reference string, err error) {
|
|
||||||
return fmt.Sprintf(referenceTemplate, version, "config.md#valid-values"), nil
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
register(SpecVersionInSemVer, rfc2119.Must, specificationVersionRef)
|
|
||||||
register(RootOnWindowsRequired, rfc2119.Required, rootRef)
|
|
||||||
register(RootOnHyperVNotSet, rfc2119.Must, rootRef)
|
|
||||||
register(RootOnNonWindowsRequired, rfc2119.Required, rootRef)
|
|
||||||
register(RootPathOnWindowsGUID, rfc2119.Must, rootRef)
|
|
||||||
register(RootPathOnPosixConvention, rfc2119.Should, rootRef)
|
|
||||||
register(RootPathExist, rfc2119.Must, rootRef)
|
|
||||||
register(RootReadonlyImplement, rfc2119.Must, rootRef)
|
|
||||||
register(RootReadonlyOnWindowsFalse, rfc2119.Must, rootRef)
|
|
||||||
register(MountsInOrder, rfc2119.Must, mountsRef)
|
|
||||||
register(MountsDestAbs, rfc2119.Must, mountsRef)
|
|
||||||
register(MountsDestOnWindowsNotNested, rfc2119.Must, mountsRef)
|
|
||||||
register(MountsOptionsOnWindowsROSupport, rfc2119.Must, mountsRef)
|
|
||||||
register(ProcRequiredAtStart, rfc2119.Required, processRef)
|
|
||||||
register(ProcConsoleSizeIgnore, rfc2119.Must, processRef)
|
|
||||||
register(ProcCwdAbs, rfc2119.Must, processRef)
|
|
||||||
register(ProcArgsOneEntryRequired, rfc2119.Required, processRef)
|
|
||||||
register(PosixProcRlimitsTypeGenError, rfc2119.Must, posixProcessRef)
|
|
||||||
register(PosixProcRlimitsTypeGet, rfc2119.Must, posixProcessRef)
|
|
||||||
register(PosixProcRlimitsTypeValueError, rfc2119.Should, posixProcessRef)
|
|
||||||
register(PosixProcRlimitsSoftMatchCur, rfc2119.Must, posixProcessRef)
|
|
||||||
register(PosixProcRlimitsHardMatchMax, rfc2119.Must, posixProcessRef)
|
|
||||||
register(PosixProcRlimitsErrorOnDup, rfc2119.Must, posixProcessRef)
|
|
||||||
register(LinuxProcCapError, rfc2119.Must, linuxProcessRef)
|
|
||||||
register(LinuxProcOomScoreAdjSet, rfc2119.Must, linuxProcessRef)
|
|
||||||
register(LinuxProcOomScoreAdjNotSet, rfc2119.Must, linuxProcessRef)
|
|
||||||
register(PlatformSpecConfOnWindowsSet, rfc2119.Must, platformSpecificConfigurationRef)
|
|
||||||
register(PosixHooksPathAbs, rfc2119.Must, posixPlatformHooksRef)
|
|
||||||
register(PosixHooksTimeoutPositive, rfc2119.Must, posixPlatformHooksRef)
|
|
||||||
register(PosixHooksCalledInOrder, rfc2119.Must, posixPlatformHooksRef)
|
|
||||||
register(PosixHooksStateToStdin, rfc2119.Must, posixPlatformHooksRef)
|
|
||||||
register(PrestartTiming, rfc2119.Must, prestartRef)
|
|
||||||
register(PoststartTiming, rfc2119.Must, poststartRef)
|
|
||||||
register(PoststopTiming, rfc2119.Must, poststopRef)
|
|
||||||
register(AnnotationsKeyValueMap, rfc2119.Must, annotationsRef)
|
|
||||||
register(AnnotationsKeyString, rfc2119.Must, annotationsRef)
|
|
||||||
register(AnnotationsKeyRequired, rfc2119.Must, annotationsRef)
|
|
||||||
register(AnnotationsKeyReversedDomain, rfc2119.Should, annotationsRef)
|
|
||||||
register(AnnotationsKeyReservedNS, rfc2119.Must, annotationsRef)
|
|
||||||
register(AnnotationsKeyIgnoreUnknown, rfc2119.Must, annotationsRef)
|
|
||||||
register(AnnotationsValueString, rfc2119.Must, annotationsRef)
|
|
||||||
register(ExtensibilityIgnoreUnknownProp, rfc2119.Must, extensibilityRef)
|
|
||||||
register(ValidValues, rfc2119.Must, validValuesRef)
|
|
||||||
}
|
|
152
vendor/github.com/opencontainers/runtime-tools/specerror/error.go
generated
vendored
152
vendor/github.com/opencontainers/runtime-tools/specerror/error.go
generated
vendored
@ -1,152 +0,0 @@
|
|||||||
// Package specerror implements runtime-spec-specific tooling for
|
|
||||||
// tracking RFC 2119 violations.
|
|
||||||
package specerror
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/hashicorp/go-multierror"
|
|
||||||
rfc2119 "github.com/opencontainers/runtime-tools/error"
|
|
||||||
)
|
|
||||||
|
|
||||||
const referenceTemplate = "https://github.com/opencontainers/runtime-spec/blob/v%s/%s"
|
|
||||||
|
|
||||||
// Code represents the spec violation, enumerating both
|
|
||||||
// configuration violations and runtime violations.
|
|
||||||
type Code int64
|
|
||||||
|
|
||||||
const (
|
|
||||||
// NonError represents that an input is not an error
|
|
||||||
NonError Code = 0x1a001 + iota
|
|
||||||
// NonRFCError represents that an error is not a rfc2119 error
|
|
||||||
NonRFCError
|
|
||||||
)
|
|
||||||
|
|
||||||
type errorTemplate struct {
|
|
||||||
Level rfc2119.Level
|
|
||||||
Reference func(version string) (reference string, err error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error represents a runtime-spec violation.
|
|
||||||
type Error struct {
|
|
||||||
// Err holds the RFC 2119 violation.
|
|
||||||
Err rfc2119.Error
|
|
||||||
|
|
||||||
// Code is a matchable holds a Code
|
|
||||||
Code Code
|
|
||||||
}
|
|
||||||
|
|
||||||
// LevelErrors represents Errors filtered into fatal and warnings.
|
|
||||||
type LevelErrors struct {
|
|
||||||
// Warnings holds Errors that were below a compliance-level threshold.
|
|
||||||
Warnings []*Error
|
|
||||||
|
|
||||||
// Error holds errors that were at or above a compliance-level
|
|
||||||
// threshold, as well as errors that are not Errors.
|
|
||||||
Error *multierror.Error
|
|
||||||
}
|
|
||||||
|
|
||||||
var ociErrors = map[Code]errorTemplate{}
|
|
||||||
|
|
||||||
func register(code Code, level rfc2119.Level, ref func(versiong string) (string, error)) {
|
|
||||||
if _, ok := ociErrors[code]; ok {
|
|
||||||
panic(fmt.Sprintf("should not regist a same code twice: %v", code))
|
|
||||||
}
|
|
||||||
|
|
||||||
ociErrors[code] = errorTemplate{Level: level, Reference: ref}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error returns the error message with specification reference.
|
|
||||||
func (err *Error) Error() string {
|
|
||||||
return err.Err.Error()
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewRFCError creates an rfc2119.Error referencing a spec violation.
|
|
||||||
//
|
|
||||||
// A version string (for the version of the spec that was violated)
|
|
||||||
// must be set to get a working URL.
|
|
||||||
func NewRFCError(code Code, err error, version string) (*rfc2119.Error, error) {
|
|
||||||
template := ociErrors[code]
|
|
||||||
reference, err2 := template.Reference(version)
|
|
||||||
if err2 != nil {
|
|
||||||
return nil, err2
|
|
||||||
}
|
|
||||||
return &rfc2119.Error{
|
|
||||||
Level: template.Level,
|
|
||||||
Reference: reference,
|
|
||||||
Err: err,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewRFCErrorOrPanic creates an rfc2119.Error referencing a spec
|
|
||||||
// violation and panics on failure. This is handy for situations
|
|
||||||
// where you can't be bothered to check NewRFCError for failure.
|
|
||||||
func NewRFCErrorOrPanic(code Code, err error, version string) *rfc2119.Error {
|
|
||||||
rfcError, err2 := NewRFCError(code, err, version)
|
|
||||||
if err2 != nil {
|
|
||||||
panic(err2.Error())
|
|
||||||
}
|
|
||||||
return rfcError
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewError creates an Error referencing a spec violation. The error
|
|
||||||
// can be cast to an *Error for extracting structured information
|
|
||||||
// about the level of the violation and a reference to the violated
|
|
||||||
// spec condition.
|
|
||||||
//
|
|
||||||
// A version string (for the version of the spec that was violated)
|
|
||||||
// must be set to get a working URL.
|
|
||||||
func NewError(code Code, err error, version string) error {
|
|
||||||
rfcError, err2 := NewRFCError(code, err, version)
|
|
||||||
if err2 != nil {
|
|
||||||
return err2
|
|
||||||
}
|
|
||||||
return &Error{
|
|
||||||
Err: *rfcError,
|
|
||||||
Code: code,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// FindError finds an error from a source error (multiple error) and
|
|
||||||
// returns the error code if found.
|
|
||||||
// If the source error is nil or empty, return NonError.
|
|
||||||
// If the source error is not a multiple error, return NonRFCError.
|
|
||||||
func FindError(err error, code Code) Code {
|
|
||||||
if err == nil {
|
|
||||||
return NonError
|
|
||||||
}
|
|
||||||
|
|
||||||
if merr, ok := err.(*multierror.Error); ok {
|
|
||||||
if merr.ErrorOrNil() == nil {
|
|
||||||
return NonError
|
|
||||||
}
|
|
||||||
for _, e := range merr.Errors {
|
|
||||||
if rfcErr, ok := e.(*Error); ok {
|
|
||||||
if rfcErr.Code == code {
|
|
||||||
return code
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return NonRFCError
|
|
||||||
}
|
|
||||||
|
|
||||||
// SplitLevel removes RFC 2119 errors with a level less than 'level'
|
|
||||||
// from the source error. If the source error is not a multierror, it
|
|
||||||
// is returned unchanged.
|
|
||||||
func SplitLevel(errIn error, level rfc2119.Level) (levelErrors LevelErrors, errOut error) {
|
|
||||||
merr, ok := errIn.(*multierror.Error)
|
|
||||||
if !ok {
|
|
||||||
return levelErrors, errIn
|
|
||||||
}
|
|
||||||
for _, err := range merr.Errors {
|
|
||||||
e, ok := err.(*Error)
|
|
||||||
if ok && e.Err.Level < level {
|
|
||||||
fmt.Println(e)
|
|
||||||
levelErrors.Warnings = append(levelErrors.Warnings, e)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
levelErrors.Error = multierror.Append(levelErrors.Error, err)
|
|
||||||
}
|
|
||||||
return levelErrors, nil
|
|
||||||
}
|
|
23
vendor/github.com/opencontainers/runtime-tools/specerror/runtime-linux.go
generated
vendored
23
vendor/github.com/opencontainers/runtime-tools/specerror/runtime-linux.go
generated
vendored
@ -1,23 +0,0 @@
|
|||||||
package specerror
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
rfc2119 "github.com/opencontainers/runtime-tools/error"
|
|
||||||
)
|
|
||||||
|
|
||||||
// define error codes
|
|
||||||
const (
|
|
||||||
// DefaultRuntimeLinuxSymlinks represents "While creating the container (step 2 in the lifecycle), runtimes MUST create default symlinks if the source file exists after processing `mounts`."
|
|
||||||
DefaultRuntimeLinuxSymlinks Code = 0xf001 + iota
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
devSymbolicLinksRef = func(version string) (reference string, err error) {
|
|
||||||
return fmt.Sprintf(referenceTemplate, version, "runtime-linux.md#dev-symbolic-links"), nil
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
register(DefaultRuntimeLinuxSymlinks, rfc2119.Must, devSymbolicLinksRef)
|
|
||||||
}
|
|
179
vendor/github.com/opencontainers/runtime-tools/specerror/runtime.go
generated
vendored
179
vendor/github.com/opencontainers/runtime-tools/specerror/runtime.go
generated
vendored
@ -1,179 +0,0 @@
|
|||||||
package specerror
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
rfc2119 "github.com/opencontainers/runtime-tools/error"
|
|
||||||
)
|
|
||||||
|
|
||||||
// define error codes
|
|
||||||
const (
|
|
||||||
// EntityOperSameContainer represents "The entity using a runtime to create a container MUST be able to use the operations defined in this specification against that same container."
|
|
||||||
EntityOperSameContainer Code = 0xe001 + iota
|
|
||||||
// StateIDUniq represents "`id` (string, REQUIRED) is the container's ID. This MUST be unique across all containers on this host."
|
|
||||||
StateIDUniq
|
|
||||||
// StateNewStatus represents "Additional values MAY be defined by the runtime, however, they MUST be used to represent new runtime states not defined above."
|
|
||||||
StateNewStatus
|
|
||||||
// DefaultStateJSONPattern represents "When serialized in JSON, the format MUST adhere to the default pattern."
|
|
||||||
DefaultStateJSONPattern
|
|
||||||
// EnvCreateImplement represents "The container's runtime environment MUST be created according to the configuration in `config.json`."
|
|
||||||
EnvCreateImplement
|
|
||||||
// EnvCreateError represents "If the runtime is unable to create the environment specified in the `config.json`, it MUST generate an error."
|
|
||||||
EnvCreateError
|
|
||||||
// ProcNotRunAtResRequest represents "While the resources requested in the `config.json` MUST be created, the user-specified program (from `process`) MUST NOT be run at this time."
|
|
||||||
ProcNotRunAtResRequest
|
|
||||||
// ConfigUpdatesWithoutAffect represents "Any updates to `config.json` after this step MUST NOT affect the container."
|
|
||||||
ConfigUpdatesWithoutAffect
|
|
||||||
// PrestartHooksInvoke represents "The prestart hooks MUST be invoked by the runtime."
|
|
||||||
PrestartHooksInvoke
|
|
||||||
// PrestartHookFailGenError represents "If any prestart hook fails, the runtime MUST generate an error, stop the container, and continue the lifecycle at step 9."
|
|
||||||
PrestartHookFailGenError
|
|
||||||
// ProcImplement represents "The runtime MUST run the user-specified program, as specified by `process`."
|
|
||||||
ProcImplement
|
|
||||||
// PoststartHooksInvoke represents "The poststart hooks MUST be invoked by the runtime."
|
|
||||||
PoststartHooksInvoke
|
|
||||||
// PoststartHookFailGenWarn represents "If any poststart hook fails, the runtime MUST log a warning, but the remaining hooks and lifecycle continue as if the hook had succeeded."
|
|
||||||
PoststartHookFailGenWarn
|
|
||||||
// UndoCreateSteps represents "The container MUST be destroyed by undoing the steps performed during create phase (step 2)."
|
|
||||||
UndoCreateSteps
|
|
||||||
// PoststopHooksInvoke represents "The poststop hooks MUST be invoked by the runtime."
|
|
||||||
PoststopHooksInvoke
|
|
||||||
// PoststopHookFailGenWarn represents "If any poststop hook fails, the runtime MUST log a warning, but the remaining hooks and lifecycle continue as if the hook had succeeded."
|
|
||||||
PoststopHookFailGenWarn
|
|
||||||
// ErrorsLeaveStateUnchange represents "Unless otherwise stated, generating an error MUST leave the state of the environment as if the operation were never attempted - modulo any possible trivial ancillary changes such as logging."
|
|
||||||
ErrorsLeaveStateUnchange
|
|
||||||
// WarnsLeaveFlowUnchange represents "Unless otherwise stated, logging a warning does not change the flow of the operation; it MUST continue as if the warning had not been logged."
|
|
||||||
WarnsLeaveFlowUnchange
|
|
||||||
// DefaultOperations represents "Unless otherwise stated, runtimes MUST support the default operations."
|
|
||||||
DefaultOperations
|
|
||||||
// QueryWithoutIDGenError represents "This operation MUST generate an error if it is not provided the ID of a container."
|
|
||||||
QueryWithoutIDGenError
|
|
||||||
// QueryNonExistGenError represents "Attempting to query a container that does not exist MUST generate an error."
|
|
||||||
QueryNonExistGenError
|
|
||||||
// QueryStateImplement represents "This operation MUST return the state of a container as specified in the State section."
|
|
||||||
QueryStateImplement
|
|
||||||
// CreateWithBundlePathAndID represents "This operation MUST generate an error if it is not provided a path to the bundle and the container ID to associate with the container."
|
|
||||||
CreateWithBundlePathAndID
|
|
||||||
// CreateWithUniqueID represents "If the ID provided is not unique across all containers within the scope of the runtime, or is not valid in any other way, the implementation MUST generate an error and a new container MUST NOT be created."
|
|
||||||
CreateWithUniqueID
|
|
||||||
// CreateNewContainer represents "This operation MUST create a new container."
|
|
||||||
CreateNewContainer
|
|
||||||
// PropsApplyExceptProcOnCreate represents "All of the properties configured in `config.json` except for `process` MUST be applied."
|
|
||||||
PropsApplyExceptProcOnCreate
|
|
||||||
// ProcArgsApplyUntilStart represents `process.args` MUST NOT be applied until triggered by the `start` operation."
|
|
||||||
ProcArgsApplyUntilStart
|
|
||||||
// PropApplyFailGenError represents "If the runtime cannot apply a property as specified in the configuration, it MUST generate an error."
|
|
||||||
PropApplyFailGenError
|
|
||||||
// PropApplyFailNotCreate represents "If the runtime cannot apply a property as specified in the configuration, a new container MUST NOT be created."
|
|
||||||
PropApplyFailNotCreate
|
|
||||||
// StartWithoutIDGenError represents "`start` operation MUST generate an error if it is not provided the container ID."
|
|
||||||
StartWithoutIDGenError
|
|
||||||
// StartNotCreatedHaveNoEffect represents "Attempting to `start` a container that is not `created` MUST have no effect on the container."
|
|
||||||
StartNotCreatedHaveNoEffect
|
|
||||||
// StartNotCreatedGenError represents "Attempting to `start` a container that is not `created` MUST generate an error."
|
|
||||||
StartNotCreatedGenError
|
|
||||||
// StartProcImplement represents "`start` operation MUST run the user-specified program as specified by `process`."
|
|
||||||
StartProcImplement
|
|
||||||
// StartWithProcUnsetGenError represents "`start` operation MUST generate an error if `process` was not set."
|
|
||||||
StartWithProcUnsetGenError
|
|
||||||
// KillWithoutIDGenError represents "`kill` operation MUST generate an error if it is not provided the container ID."
|
|
||||||
KillWithoutIDGenError
|
|
||||||
// KillNonCreateRunHaveNoEffect represents "Attempting to send a signal to a container that is neither `created` nor `running` MUST have no effect on the container."
|
|
||||||
KillNonCreateRunHaveNoEffect
|
|
||||||
// KillNonCreateRunGenError represents "Attempting to send a signal to a container that is neither `created` nor `running` MUST generate an error."
|
|
||||||
KillNonCreateRunGenError
|
|
||||||
// KillSignalImplement represents "`kill` operation MUST send the specified signal to the container process."
|
|
||||||
KillSignalImplement
|
|
||||||
// DeleteWithoutIDGenError represents "`delete` operation MUST generate an error if it is not provided the container ID."
|
|
||||||
DeleteWithoutIDGenError
|
|
||||||
// DeleteNonStopHaveNoEffect represents "Attempting to `delete` a container that is not `stopped` MUST have no effect on the container."
|
|
||||||
DeleteNonStopHaveNoEffect
|
|
||||||
// DeleteNonStopGenError represents "Attempting to `delete` a container that is not `stopped` MUST generate an error."
|
|
||||||
DeleteNonStopGenError
|
|
||||||
// DeleteResImplement represents "Deleting a container MUST delete the resources that were created during the `create` step."
|
|
||||||
DeleteResImplement
|
|
||||||
// DeleteOnlyCreatedRes represents "Note that resources associated with the container, but not created by this container, MUST NOT be deleted."
|
|
||||||
DeleteOnlyCreatedRes
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
scopeOfAContainerRef = func(version string) (reference string, err error) {
|
|
||||||
return fmt.Sprintf(referenceTemplate, version, "runtime.md#scope-of-a-container"), nil
|
|
||||||
}
|
|
||||||
stateRef = func(version string) (reference string, err error) {
|
|
||||||
return fmt.Sprintf(referenceTemplate, version, "runtime.md#state"), nil
|
|
||||||
}
|
|
||||||
lifecycleRef = func(version string) (reference string, err error) {
|
|
||||||
return fmt.Sprintf(referenceTemplate, version, "runtime.md#lifecycle"), nil
|
|
||||||
}
|
|
||||||
errorsRef = func(version string) (reference string, err error) {
|
|
||||||
return fmt.Sprintf(referenceTemplate, version, "runtime.md#errors"), nil
|
|
||||||
}
|
|
||||||
warningsRef = func(version string) (reference string, err error) {
|
|
||||||
return fmt.Sprintf(referenceTemplate, version, "runtime.md#warnings"), nil
|
|
||||||
}
|
|
||||||
operationsRef = func(version string) (reference string, err error) {
|
|
||||||
return fmt.Sprintf(referenceTemplate, version, "runtime.md#operations"), nil
|
|
||||||
}
|
|
||||||
queryStateRef = func(version string) (reference string, err error) {
|
|
||||||
return fmt.Sprintf(referenceTemplate, version, "runtime.md#query-state"), nil
|
|
||||||
}
|
|
||||||
createRef = func(version string) (reference string, err error) {
|
|
||||||
return fmt.Sprintf(referenceTemplate, version, "runtime.md#create"), nil
|
|
||||||
}
|
|
||||||
startRef = func(version string) (reference string, err error) {
|
|
||||||
return fmt.Sprintf(referenceTemplate, version, "runtime.md#start"), nil
|
|
||||||
}
|
|
||||||
killRef = func(version string) (reference string, err error) {
|
|
||||||
return fmt.Sprintf(referenceTemplate, version, "runtime.md#kill"), nil
|
|
||||||
}
|
|
||||||
deleteRef = func(version string) (reference string, err error) {
|
|
||||||
return fmt.Sprintf(referenceTemplate, version, "runtime.md#delete"), nil
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
register(EntityOperSameContainer, rfc2119.Must, scopeOfAContainerRef)
|
|
||||||
register(StateIDUniq, rfc2119.Must, stateRef)
|
|
||||||
register(StateNewStatus, rfc2119.Must, stateRef)
|
|
||||||
register(DefaultStateJSONPattern, rfc2119.Must, stateRef)
|
|
||||||
register(EnvCreateImplement, rfc2119.Must, lifecycleRef)
|
|
||||||
register(EnvCreateError, rfc2119.Must, lifecycleRef)
|
|
||||||
register(ProcNotRunAtResRequest, rfc2119.Must, lifecycleRef)
|
|
||||||
register(ConfigUpdatesWithoutAffect, rfc2119.Must, lifecycleRef)
|
|
||||||
register(PrestartHooksInvoke, rfc2119.Must, lifecycleRef)
|
|
||||||
register(PrestartHookFailGenError, rfc2119.Must, lifecycleRef)
|
|
||||||
register(ProcImplement, rfc2119.Must, lifecycleRef)
|
|
||||||
register(PoststartHooksInvoke, rfc2119.Must, lifecycleRef)
|
|
||||||
register(PoststartHookFailGenWarn, rfc2119.Must, lifecycleRef)
|
|
||||||
register(UndoCreateSteps, rfc2119.Must, lifecycleRef)
|
|
||||||
register(PoststopHooksInvoke, rfc2119.Must, lifecycleRef)
|
|
||||||
register(PoststopHookFailGenWarn, rfc2119.Must, lifecycleRef)
|
|
||||||
register(ErrorsLeaveStateUnchange, rfc2119.Must, errorsRef)
|
|
||||||
register(WarnsLeaveFlowUnchange, rfc2119.Must, warningsRef)
|
|
||||||
register(DefaultOperations, rfc2119.Must, operationsRef)
|
|
||||||
register(QueryWithoutIDGenError, rfc2119.Must, queryStateRef)
|
|
||||||
register(QueryNonExistGenError, rfc2119.Must, queryStateRef)
|
|
||||||
register(QueryStateImplement, rfc2119.Must, queryStateRef)
|
|
||||||
register(CreateWithBundlePathAndID, rfc2119.Must, createRef)
|
|
||||||
register(CreateWithUniqueID, rfc2119.Must, createRef)
|
|
||||||
register(CreateNewContainer, rfc2119.Must, createRef)
|
|
||||||
register(PropsApplyExceptProcOnCreate, rfc2119.Must, createRef)
|
|
||||||
register(ProcArgsApplyUntilStart, rfc2119.Must, createRef)
|
|
||||||
register(PropApplyFailGenError, rfc2119.Must, createRef)
|
|
||||||
register(PropApplyFailNotCreate, rfc2119.Must, createRef)
|
|
||||||
register(StartWithoutIDGenError, rfc2119.Must, startRef)
|
|
||||||
register(StartNotCreatedHaveNoEffect, rfc2119.Must, startRef)
|
|
||||||
register(StartNotCreatedGenError, rfc2119.Must, startRef)
|
|
||||||
register(StartProcImplement, rfc2119.Must, startRef)
|
|
||||||
register(StartWithProcUnsetGenError, rfc2119.Must, startRef)
|
|
||||||
register(KillWithoutIDGenError, rfc2119.Must, killRef)
|
|
||||||
register(KillNonCreateRunHaveNoEffect, rfc2119.Must, killRef)
|
|
||||||
register(KillNonCreateRunGenError, rfc2119.Must, killRef)
|
|
||||||
register(KillSignalImplement, rfc2119.Must, killRef)
|
|
||||||
register(DeleteWithoutIDGenError, rfc2119.Must, deleteRef)
|
|
||||||
register(DeleteNonStopHaveNoEffect, rfc2119.Must, deleteRef)
|
|
||||||
register(DeleteNonStopGenError, rfc2119.Must, deleteRef)
|
|
||||||
register(DeleteResImplement, rfc2119.Must, deleteRef)
|
|
||||||
register(DeleteOnlyCreatedRes, rfc2119.Must, deleteRef)
|
|
||||||
}
|
|
838
vendor/github.com/opencontainers/runtime-tools/validate/validate.go
generated
vendored
838
vendor/github.com/opencontainers/runtime-tools/validate/validate.go
generated
vendored
@ -1,838 +0,0 @@
|
|||||||
package validate
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"net"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"reflect"
|
|
||||||
"regexp"
|
|
||||||
"runtime"
|
|
||||||
"strings"
|
|
||||||
"unicode"
|
|
||||||
"unicode/utf8"
|
|
||||||
|
|
||||||
"github.com/blang/semver"
|
|
||||||
"github.com/hashicorp/go-multierror"
|
|
||||||
rspec "github.com/opencontainers/runtime-spec/specs-go"
|
|
||||||
osFilepath "github.com/opencontainers/runtime-tools/filepath"
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
"github.com/syndtr/gocapability/capability"
|
|
||||||
|
|
||||||
"github.com/opencontainers/runtime-tools/specerror"
|
|
||||||
"github.com/xeipuuv/gojsonschema"
|
|
||||||
)
|
|
||||||
|
|
||||||
const specConfig = "config.json"
|
|
||||||
|
|
||||||
var (
|
|
||||||
// http://pubs.opengroup.org/onlinepubs/9699919799/functions/getrlimit.html
|
|
||||||
posixRlimits = []string{
|
|
||||||
"RLIMIT_AS",
|
|
||||||
"RLIMIT_CORE",
|
|
||||||
"RLIMIT_CPU",
|
|
||||||
"RLIMIT_DATA",
|
|
||||||
"RLIMIT_FSIZE",
|
|
||||||
"RLIMIT_NOFILE",
|
|
||||||
"RLIMIT_STACK",
|
|
||||||
}
|
|
||||||
|
|
||||||
// https://git.kernel.org/pub/scm/docs/man-pages/man-pages.git/tree/man2/getrlimit.2?h=man-pages-4.13
|
|
||||||
linuxRlimits = append(posixRlimits, []string{
|
|
||||||
"RLIMIT_MEMLOCK",
|
|
||||||
"RLIMIT_MSGQUEUE",
|
|
||||||
"RLIMIT_NICE",
|
|
||||||
"RLIMIT_NPROC",
|
|
||||||
"RLIMIT_RSS",
|
|
||||||
"RLIMIT_RTPRIO",
|
|
||||||
"RLIMIT_RTTIME",
|
|
||||||
"RLIMIT_SIGPENDING",
|
|
||||||
}...)
|
|
||||||
|
|
||||||
configSchemaTemplate = "https://raw.githubusercontent.com/opencontainers/runtime-spec/v%s/schema/config-schema.json"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Validator represents a validator for runtime bundle
|
|
||||||
type Validator struct {
|
|
||||||
spec *rspec.Spec
|
|
||||||
bundlePath string
|
|
||||||
HostSpecific bool
|
|
||||||
platform string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewValidator creates a Validator
|
|
||||||
func NewValidator(spec *rspec.Spec, bundlePath string, hostSpecific bool, platform string) (Validator, error) {
|
|
||||||
if hostSpecific && platform != runtime.GOOS {
|
|
||||||
return Validator{}, fmt.Errorf("When hostSpecific is set, platform must be same as the host platform")
|
|
||||||
}
|
|
||||||
return Validator{
|
|
||||||
spec: spec,
|
|
||||||
bundlePath: bundlePath,
|
|
||||||
HostSpecific: hostSpecific,
|
|
||||||
platform: platform,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewValidatorFromPath creates a Validator with specified bundle path
|
|
||||||
func NewValidatorFromPath(bundlePath string, hostSpecific bool, platform string) (Validator, error) {
|
|
||||||
if bundlePath == "" {
|
|
||||||
return Validator{}, fmt.Errorf("bundle path shouldn't be empty")
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := os.Stat(bundlePath); err != nil {
|
|
||||||
return Validator{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
configPath := filepath.Join(bundlePath, specConfig)
|
|
||||||
content, err := ioutil.ReadFile(configPath)
|
|
||||||
if err != nil {
|
|
||||||
return Validator{}, specerror.NewError(specerror.ConfigInRootBundleDir, err, rspec.Version)
|
|
||||||
}
|
|
||||||
if !utf8.Valid(content) {
|
|
||||||
return Validator{}, fmt.Errorf("%q is not encoded in UTF-8", configPath)
|
|
||||||
}
|
|
||||||
var spec rspec.Spec
|
|
||||||
if err = json.Unmarshal(content, &spec); err != nil {
|
|
||||||
return Validator{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return NewValidator(&spec, bundlePath, hostSpecific, platform)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CheckAll checks all parts of runtime bundle
|
|
||||||
func (v *Validator) CheckAll() error {
|
|
||||||
var errs *multierror.Error
|
|
||||||
errs = multierror.Append(errs, v.CheckJSONSchema())
|
|
||||||
errs = multierror.Append(errs, v.CheckPlatform())
|
|
||||||
errs = multierror.Append(errs, v.CheckRoot())
|
|
||||||
errs = multierror.Append(errs, v.CheckMandatoryFields())
|
|
||||||
errs = multierror.Append(errs, v.CheckSemVer())
|
|
||||||
errs = multierror.Append(errs, v.CheckMounts())
|
|
||||||
errs = multierror.Append(errs, v.CheckProcess())
|
|
||||||
errs = multierror.Append(errs, v.CheckLinux())
|
|
||||||
errs = multierror.Append(errs, v.CheckAnnotations())
|
|
||||||
if v.platform == "linux" || v.platform == "solaris" {
|
|
||||||
errs = multierror.Append(errs, v.CheckHooks())
|
|
||||||
}
|
|
||||||
|
|
||||||
return errs.ErrorOrNil()
|
|
||||||
}
|
|
||||||
|
|
||||||
// JSONSchemaURL returns the URL for the JSON Schema specifying the
|
|
||||||
// configuration format. It consumes configSchemaTemplate, but we
|
|
||||||
// provide it as a function to isolate consumers from inconsistent
|
|
||||||
// naming as runtime-spec evolves.
|
|
||||||
func JSONSchemaURL(version string) (url string, err error) {
|
|
||||||
ver, err := semver.Parse(version)
|
|
||||||
if err != nil {
|
|
||||||
return "", specerror.NewError(specerror.SpecVersionInSemVer, err, rspec.Version)
|
|
||||||
}
|
|
||||||
configRenamedToConfigSchemaVersion, err := semver.Parse("1.0.0-rc2") // config.json became config-schema.json in 1.0.0-rc2
|
|
||||||
if ver.Compare(configRenamedToConfigSchemaVersion) == -1 {
|
|
||||||
return "", fmt.Errorf("unsupported configuration version (older than %s)", configRenamedToConfigSchemaVersion)
|
|
||||||
}
|
|
||||||
return fmt.Sprintf(configSchemaTemplate, version), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CheckJSONSchema validates the configuration against the
|
|
||||||
// runtime-spec JSON Schema, using the version of the schema that
|
|
||||||
// matches the configuration's declared version.
|
|
||||||
func (v *Validator) CheckJSONSchema() (errs error) {
|
|
||||||
logrus.Debugf("check JSON schema")
|
|
||||||
|
|
||||||
url, err := JSONSchemaURL(v.spec.Version)
|
|
||||||
if err != nil {
|
|
||||||
errs = multierror.Append(errs, err)
|
|
||||||
return errs
|
|
||||||
}
|
|
||||||
|
|
||||||
schemaLoader := gojsonschema.NewReferenceLoader(url)
|
|
||||||
documentLoader := gojsonschema.NewGoLoader(v.spec)
|
|
||||||
result, err := gojsonschema.Validate(schemaLoader, documentLoader)
|
|
||||||
if err != nil {
|
|
||||||
errs = multierror.Append(errs, err)
|
|
||||||
return errs
|
|
||||||
}
|
|
||||||
|
|
||||||
if !result.Valid() {
|
|
||||||
for _, resultError := range result.Errors() {
|
|
||||||
errs = multierror.Append(errs, errors.New(resultError.String()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return errs
|
|
||||||
}
|
|
||||||
|
|
||||||
// CheckRoot checks status of v.spec.Root
|
|
||||||
func (v *Validator) CheckRoot() (errs error) {
|
|
||||||
logrus.Debugf("check root")
|
|
||||||
|
|
||||||
if v.platform == "windows" && v.spec.Windows != nil {
|
|
||||||
if v.spec.Windows.HyperV != nil {
|
|
||||||
if v.spec.Root != nil {
|
|
||||||
errs = multierror.Append(errs,
|
|
||||||
specerror.NewError(specerror.RootOnHyperVNotSet, fmt.Errorf("for Hyper-V containers, Root must not be set"), rspec.Version))
|
|
||||||
}
|
|
||||||
return
|
|
||||||
} else if v.spec.Root == nil {
|
|
||||||
errs = multierror.Append(errs,
|
|
||||||
specerror.NewError(specerror.RootOnWindowsRequired, fmt.Errorf("on Windows, for Windows Server Containers, this field is REQUIRED"), rspec.Version))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
} else if v.platform != "windows" && v.spec.Root == nil {
|
|
||||||
errs = multierror.Append(errs,
|
|
||||||
specerror.NewError(specerror.RootOnNonWindowsRequired, fmt.Errorf("on all other platforms, this field is REQUIRED"), rspec.Version))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if v.platform == "windows" {
|
|
||||||
matched, err := regexp.MatchString(`\\\\[?]\\Volume[{][a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}[}]\\`, v.spec.Root.Path)
|
|
||||||
if err != nil {
|
|
||||||
errs = multierror.Append(errs, err)
|
|
||||||
} else if !matched {
|
|
||||||
errs = multierror.Append(errs,
|
|
||||||
specerror.NewError(specerror.RootPathOnWindowsGUID, fmt.Errorf("root.path is %q, but it MUST be a volume GUID path when target platform is windows", v.spec.Root.Path), rspec.Version))
|
|
||||||
}
|
|
||||||
|
|
||||||
if v.spec.Root.Readonly {
|
|
||||||
errs = multierror.Append(errs,
|
|
||||||
specerror.NewError(specerror.RootReadonlyOnWindowsFalse, fmt.Errorf("root.readonly field MUST be omitted or false when target platform is windows"), rspec.Version))
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
absBundlePath, err := filepath.Abs(v.bundlePath)
|
|
||||||
if err != nil {
|
|
||||||
errs = multierror.Append(errs, fmt.Errorf("unable to convert %q to an absolute path", v.bundlePath))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if filepath.Base(v.spec.Root.Path) != "rootfs" {
|
|
||||||
errs = multierror.Append(errs,
|
|
||||||
specerror.NewError(specerror.RootPathOnPosixConvention, fmt.Errorf("path name should be the conventional 'rootfs'"), rspec.Version))
|
|
||||||
}
|
|
||||||
|
|
||||||
var rootfsPath string
|
|
||||||
var absRootPath string
|
|
||||||
if filepath.IsAbs(v.spec.Root.Path) {
|
|
||||||
rootfsPath = v.spec.Root.Path
|
|
||||||
absRootPath = filepath.Clean(rootfsPath)
|
|
||||||
} else {
|
|
||||||
var err error
|
|
||||||
rootfsPath = filepath.Join(v.bundlePath, v.spec.Root.Path)
|
|
||||||
absRootPath, err = filepath.Abs(rootfsPath)
|
|
||||||
if err != nil {
|
|
||||||
errs = multierror.Append(errs, fmt.Errorf("unable to convert %q to an absolute path", rootfsPath))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if fi, err := os.Stat(rootfsPath); err != nil {
|
|
||||||
errs = multierror.Append(errs,
|
|
||||||
specerror.NewError(specerror.RootPathExist, fmt.Errorf("cannot find the root path %q", rootfsPath), rspec.Version))
|
|
||||||
} else if !fi.IsDir() {
|
|
||||||
errs = multierror.Append(errs,
|
|
||||||
specerror.NewError(specerror.RootPathExist, fmt.Errorf("root.path %q is not a directory", rootfsPath), rspec.Version))
|
|
||||||
}
|
|
||||||
|
|
||||||
rootParent := filepath.Dir(absRootPath)
|
|
||||||
if absRootPath == string(filepath.Separator) || rootParent != absBundlePath {
|
|
||||||
errs = multierror.Append(errs,
|
|
||||||
specerror.NewError(specerror.ArtifactsInSingleDir, fmt.Errorf("root.path is %q, but it MUST be a child of %q", v.spec.Root.Path, absBundlePath), rspec.Version))
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// CheckSemVer checks v.spec.Version
|
|
||||||
func (v *Validator) CheckSemVer() (errs error) {
|
|
||||||
logrus.Debugf("check semver")
|
|
||||||
|
|
||||||
version := v.spec.Version
|
|
||||||
_, err := semver.Parse(version)
|
|
||||||
if err != nil {
|
|
||||||
errs = multierror.Append(errs,
|
|
||||||
specerror.NewError(specerror.SpecVersionInSemVer, fmt.Errorf("%q is not valid SemVer: %s", version, err.Error()), rspec.Version))
|
|
||||||
}
|
|
||||||
if version != rspec.Version {
|
|
||||||
errs = multierror.Append(errs, fmt.Errorf("validate currently only handles version %s, but the supplied configuration targets %s", rspec.Version, version))
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// CheckHooks check v.spec.Hooks
|
|
||||||
func (v *Validator) CheckHooks() (errs error) {
|
|
||||||
logrus.Debugf("check hooks")
|
|
||||||
|
|
||||||
if v.platform != "linux" && v.platform != "solaris" {
|
|
||||||
errs = multierror.Append(errs, fmt.Errorf("For %q platform, the configuration structure does not support hooks", v.platform))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if v.spec.Hooks != nil {
|
|
||||||
errs = multierror.Append(errs, v.checkEventHooks("prestart", v.spec.Hooks.Prestart, v.HostSpecific))
|
|
||||||
errs = multierror.Append(errs, v.checkEventHooks("poststart", v.spec.Hooks.Poststart, v.HostSpecific))
|
|
||||||
errs = multierror.Append(errs, v.checkEventHooks("poststop", v.spec.Hooks.Poststop, v.HostSpecific))
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *Validator) checkEventHooks(hookType string, hooks []rspec.Hook, hostSpecific bool) (errs error) {
|
|
||||||
for i, hook := range hooks {
|
|
||||||
if !osFilepath.IsAbs(v.platform, hook.Path) {
|
|
||||||
errs = multierror.Append(errs,
|
|
||||||
specerror.NewError(
|
|
||||||
specerror.PosixHooksPathAbs,
|
|
||||||
fmt.Errorf("hooks.%s[%d].path %v: is not absolute path",
|
|
||||||
hookType, i, hook.Path),
|
|
||||||
rspec.Version))
|
|
||||||
}
|
|
||||||
|
|
||||||
if hostSpecific {
|
|
||||||
fi, err := os.Stat(hook.Path)
|
|
||||||
if err != nil {
|
|
||||||
errs = multierror.Append(errs, fmt.Errorf("cannot find %s hook: %v", hookType, hook.Path))
|
|
||||||
}
|
|
||||||
if fi.Mode()&0111 == 0 {
|
|
||||||
errs = multierror.Append(errs, fmt.Errorf("the %s hook %v: is not executable", hookType, hook.Path))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, env := range hook.Env {
|
|
||||||
if !envValid(env) {
|
|
||||||
errs = multierror.Append(errs, fmt.Errorf("env %q for hook %v is in the invalid form", env, hook.Path))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// CheckProcess checks v.spec.Process
|
|
||||||
func (v *Validator) CheckProcess() (errs error) {
|
|
||||||
logrus.Debugf("check process")
|
|
||||||
|
|
||||||
if v.spec.Process == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
process := v.spec.Process
|
|
||||||
if !osFilepath.IsAbs(v.platform, process.Cwd) {
|
|
||||||
errs = multierror.Append(errs,
|
|
||||||
specerror.NewError(
|
|
||||||
specerror.ProcCwdAbs,
|
|
||||||
fmt.Errorf("cwd %q is not an absolute path", process.Cwd),
|
|
||||||
rspec.Version))
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, env := range process.Env {
|
|
||||||
if !envValid(env) {
|
|
||||||
errs = multierror.Append(errs, fmt.Errorf("env %q should be in the form of 'key=value'. The left hand side must consist solely of letters, digits, and underscores '_'", env))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(process.Args) == 0 {
|
|
||||||
errs = multierror.Append(errs,
|
|
||||||
specerror.NewError(
|
|
||||||
specerror.ProcArgsOneEntryRequired,
|
|
||||||
fmt.Errorf("args must not be empty"),
|
|
||||||
rspec.Version))
|
|
||||||
} else {
|
|
||||||
if filepath.IsAbs(process.Args[0]) && v.spec.Root != nil {
|
|
||||||
var rootfsPath string
|
|
||||||
if filepath.IsAbs(v.spec.Root.Path) {
|
|
||||||
rootfsPath = v.spec.Root.Path
|
|
||||||
} else {
|
|
||||||
rootfsPath = filepath.Join(v.bundlePath, v.spec.Root.Path)
|
|
||||||
}
|
|
||||||
absPath := filepath.Join(rootfsPath, process.Args[0])
|
|
||||||
fileinfo, err := os.Stat(absPath)
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
logrus.Warnf("executable %q is not available in rootfs currently", process.Args[0])
|
|
||||||
} else if err != nil {
|
|
||||||
errs = multierror.Append(errs, err)
|
|
||||||
} else {
|
|
||||||
m := fileinfo.Mode()
|
|
||||||
if m.IsDir() || m&0111 == 0 {
|
|
||||||
errs = multierror.Append(errs, fmt.Errorf("arg %q is not executable", process.Args[0]))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if v.platform == "linux" || v.platform == "solaris" {
|
|
||||||
errs = multierror.Append(errs, v.CheckRlimits())
|
|
||||||
}
|
|
||||||
|
|
||||||
if v.platform == "linux" {
|
|
||||||
if v.spec.Process.Capabilities != nil {
|
|
||||||
errs = multierror.Append(errs, v.CheckCapabilities())
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(process.ApparmorProfile) > 0 {
|
|
||||||
profilePath := filepath.Join(v.bundlePath, v.spec.Root.Path, "/etc/apparmor.d", process.ApparmorProfile)
|
|
||||||
_, err := os.Stat(profilePath)
|
|
||||||
if err != nil {
|
|
||||||
errs = multierror.Append(errs, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// CheckCapabilities checks v.spec.Process.Capabilities
|
|
||||||
func (v *Validator) CheckCapabilities() (errs error) {
|
|
||||||
if v.platform != "linux" {
|
|
||||||
errs = multierror.Append(errs, fmt.Errorf("For %q platform, the configuration structure does not support process.capabilities", v.platform))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
process := v.spec.Process
|
|
||||||
var effective, permitted, inheritable, ambient bool
|
|
||||||
caps := make(map[string][]string)
|
|
||||||
|
|
||||||
for _, cap := range process.Capabilities.Bounding {
|
|
||||||
caps[cap] = append(caps[cap], "bounding")
|
|
||||||
}
|
|
||||||
for _, cap := range process.Capabilities.Effective {
|
|
||||||
caps[cap] = append(caps[cap], "effective")
|
|
||||||
}
|
|
||||||
for _, cap := range process.Capabilities.Inheritable {
|
|
||||||
caps[cap] = append(caps[cap], "inheritable")
|
|
||||||
}
|
|
||||||
for _, cap := range process.Capabilities.Permitted {
|
|
||||||
caps[cap] = append(caps[cap], "permitted")
|
|
||||||
}
|
|
||||||
for _, cap := range process.Capabilities.Ambient {
|
|
||||||
caps[cap] = append(caps[cap], "ambient")
|
|
||||||
}
|
|
||||||
|
|
||||||
for capability, owns := range caps {
|
|
||||||
if err := CapValid(capability, v.HostSpecific); err != nil {
|
|
||||||
errs = multierror.Append(errs, fmt.Errorf("capability %q is not valid, man capabilities(7)", capability))
|
|
||||||
}
|
|
||||||
|
|
||||||
effective, permitted, ambient, inheritable = false, false, false, false
|
|
||||||
for _, set := range owns {
|
|
||||||
if set == "effective" {
|
|
||||||
effective = true
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if set == "inheritable" {
|
|
||||||
inheritable = true
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if set == "permitted" {
|
|
||||||
permitted = true
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if set == "ambient" {
|
|
||||||
ambient = true
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if effective && !permitted {
|
|
||||||
errs = multierror.Append(errs, fmt.Errorf("effective capability %q is not allowed, as it's not permitted", capability))
|
|
||||||
}
|
|
||||||
if ambient && !(permitted && inheritable) {
|
|
||||||
errs = multierror.Append(errs, fmt.Errorf("ambient capability %q is not allowed, as it's not permitted and inheribate", capability))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// CheckRlimits checks v.spec.Process.Rlimits
|
|
||||||
func (v *Validator) CheckRlimits() (errs error) {
|
|
||||||
if v.platform != "linux" && v.platform != "solaris" {
|
|
||||||
errs = multierror.Append(errs, fmt.Errorf("For %q platform, the configuration structure does not support process.rlimits", v.platform))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
process := v.spec.Process
|
|
||||||
for index, rlimit := range process.Rlimits {
|
|
||||||
for i := index + 1; i < len(process.Rlimits); i++ {
|
|
||||||
if process.Rlimits[index].Type == process.Rlimits[i].Type {
|
|
||||||
errs = multierror.Append(errs,
|
|
||||||
specerror.NewError(
|
|
||||||
specerror.PosixProcRlimitsErrorOnDup,
|
|
||||||
fmt.Errorf("rlimit can not contain the same type %q",
|
|
||||||
process.Rlimits[index].Type),
|
|
||||||
rspec.Version))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
errs = multierror.Append(errs, v.rlimitValid(rlimit))
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func supportedMountTypes(OS string, hostSpecific bool) (map[string]bool, error) {
|
|
||||||
supportedTypes := make(map[string]bool)
|
|
||||||
|
|
||||||
if OS != "linux" && OS != "windows" {
|
|
||||||
logrus.Warnf("%v is not supported to check mount type", OS)
|
|
||||||
return nil, nil
|
|
||||||
} else if OS == "windows" {
|
|
||||||
supportedTypes["ntfs"] = true
|
|
||||||
return supportedTypes, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if hostSpecific {
|
|
||||||
f, err := os.Open("/proc/filesystems")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
s := bufio.NewScanner(f)
|
|
||||||
for s.Scan() {
|
|
||||||
if err := s.Err(); err != nil {
|
|
||||||
return supportedTypes, err
|
|
||||||
}
|
|
||||||
|
|
||||||
text := s.Text()
|
|
||||||
parts := strings.Split(text, "\t")
|
|
||||||
if len(parts) > 1 {
|
|
||||||
supportedTypes[parts[1]] = true
|
|
||||||
} else {
|
|
||||||
supportedTypes[parts[0]] = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
supportedTypes["bind"] = true
|
|
||||||
|
|
||||||
return supportedTypes, nil
|
|
||||||
}
|
|
||||||
logrus.Warn("Checking linux mount types without --host-specific is not supported yet")
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CheckMounts checks v.spec.Mounts
|
|
||||||
func (v *Validator) CheckMounts() (errs error) {
|
|
||||||
logrus.Debugf("check mounts")
|
|
||||||
|
|
||||||
supportedTypes, err := supportedMountTypes(v.platform, v.HostSpecific)
|
|
||||||
if err != nil {
|
|
||||||
errs = multierror.Append(errs, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, mountA := range v.spec.Mounts {
|
|
||||||
if supportedTypes != nil && !supportedTypes[mountA.Type] {
|
|
||||||
errs = multierror.Append(errs, fmt.Errorf("unsupported mount type %q", mountA.Type))
|
|
||||||
}
|
|
||||||
if !osFilepath.IsAbs(v.platform, mountA.Destination) {
|
|
||||||
errs = multierror.Append(errs,
|
|
||||||
specerror.NewError(
|
|
||||||
specerror.MountsDestAbs,
|
|
||||||
fmt.Errorf("mounts[%d].destination %q is not absolute",
|
|
||||||
i,
|
|
||||||
mountA.Destination),
|
|
||||||
rspec.Version))
|
|
||||||
}
|
|
||||||
for j, mountB := range v.spec.Mounts {
|
|
||||||
if i == j {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// whether B.Desination is nested within A.Destination
|
|
||||||
nested, err := osFilepath.IsAncestor(v.platform, mountA.Destination, mountB.Destination, ".")
|
|
||||||
if err != nil {
|
|
||||||
errs = multierror.Append(errs, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if nested {
|
|
||||||
if v.platform == "windows" && i < j {
|
|
||||||
errs = multierror.Append(errs,
|
|
||||||
specerror.NewError(
|
|
||||||
specerror.MountsDestOnWindowsNotNested,
|
|
||||||
fmt.Errorf("on Windows, %v nested within %v is forbidden",
|
|
||||||
mountB.Destination, mountA.Destination),
|
|
||||||
rspec.Version))
|
|
||||||
}
|
|
||||||
if i > j {
|
|
||||||
logrus.Warnf("%v will be covered by %v", mountB.Destination, mountA.Destination)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// CheckPlatform checks v.platform
|
|
||||||
func (v *Validator) CheckPlatform() (errs error) {
|
|
||||||
logrus.Debugf("check platform")
|
|
||||||
|
|
||||||
if v.platform != "linux" && v.platform != "solaris" && v.platform != "windows" {
|
|
||||||
errs = multierror.Append(errs, fmt.Errorf("platform %q is not supported", v.platform))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if v.HostSpecific && v.platform != runtime.GOOS {
|
|
||||||
errs = multierror.Append(errs, fmt.Errorf("platform %q differs from the host %q, skipping host-specific checks", v.platform, runtime.GOOS))
|
|
||||||
v.HostSpecific = false
|
|
||||||
}
|
|
||||||
|
|
||||||
if v.platform == "windows" {
|
|
||||||
if v.spec.Windows == nil {
|
|
||||||
errs = multierror.Append(errs,
|
|
||||||
specerror.NewError(
|
|
||||||
specerror.PlatformSpecConfOnWindowsSet,
|
|
||||||
fmt.Errorf("'windows' MUST be set when platform is `windows`"),
|
|
||||||
rspec.Version))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// CheckLinuxResources checks v.spec.Linux.Resources
|
|
||||||
func (v *Validator) CheckLinuxResources() (errs error) {
|
|
||||||
logrus.Debugf("check linux resources")
|
|
||||||
|
|
||||||
r := v.spec.Linux.Resources
|
|
||||||
if r.Memory != nil {
|
|
||||||
if r.Memory.Limit != nil && r.Memory.Swap != nil && uint64(*r.Memory.Limit) > uint64(*r.Memory.Swap) {
|
|
||||||
errs = multierror.Append(errs, fmt.Errorf("minimum memoryswap should be larger than memory limit"))
|
|
||||||
}
|
|
||||||
if r.Memory.Limit != nil && r.Memory.Reservation != nil && uint64(*r.Memory.Reservation) > uint64(*r.Memory.Limit) {
|
|
||||||
errs = multierror.Append(errs, fmt.Errorf("minimum memory limit should be larger than memory reservation"))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if r.Network != nil && v.HostSpecific {
|
|
||||||
var exist bool
|
|
||||||
interfaces, err := net.Interfaces()
|
|
||||||
if err != nil {
|
|
||||||
errs = multierror.Append(errs, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for _, prio := range r.Network.Priorities {
|
|
||||||
exist = false
|
|
||||||
for _, ni := range interfaces {
|
|
||||||
if prio.Name == ni.Name {
|
|
||||||
exist = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !exist {
|
|
||||||
errs = multierror.Append(errs, fmt.Errorf("interface %s does not exist currently", prio.Name))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for index := 0; index < len(r.Devices); index++ {
|
|
||||||
switch r.Devices[index].Type {
|
|
||||||
case "a", "b", "c", "":
|
|
||||||
default:
|
|
||||||
errs = multierror.Append(errs, fmt.Errorf("type of devices %s is invalid", r.Devices[index].Type))
|
|
||||||
}
|
|
||||||
|
|
||||||
access := []byte(r.Devices[index].Access)
|
|
||||||
for i := 0; i < len(access); i++ {
|
|
||||||
switch access[i] {
|
|
||||||
case 'r', 'w', 'm':
|
|
||||||
default:
|
|
||||||
errs = multierror.Append(errs, fmt.Errorf("access %s is invalid", r.Devices[index].Access))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if r.BlockIO != nil && r.BlockIO.WeightDevice != nil {
|
|
||||||
for i, weightDevice := range r.BlockIO.WeightDevice {
|
|
||||||
if weightDevice.Weight == nil && weightDevice.LeafWeight == nil {
|
|
||||||
errs = multierror.Append(errs,
|
|
||||||
specerror.NewError(
|
|
||||||
specerror.BlkIOWeightOrLeafWeightExist,
|
|
||||||
fmt.Errorf("linux.resources.blockIO.weightDevice[%d] specifies neither weight nor leafWeight", i),
|
|
||||||
rspec.Version))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// CheckAnnotations checks v.spec.Annotations
|
|
||||||
func (v *Validator) CheckAnnotations() (errs error) {
|
|
||||||
logrus.Debugf("check annotations")
|
|
||||||
|
|
||||||
reversedDomain := regexp.MustCompile(`^[A-Za-z]{2,6}(\.[A-Za-z0-9-]{1,63})+$`)
|
|
||||||
for key := range v.spec.Annotations {
|
|
||||||
if strings.HasPrefix(key, "org.opencontainers") {
|
|
||||||
errs = multierror.Append(errs,
|
|
||||||
specerror.NewError(
|
|
||||||
specerror.AnnotationsKeyReservedNS,
|
|
||||||
fmt.Errorf("key %q is reserved", key),
|
|
||||||
rspec.Version))
|
|
||||||
}
|
|
||||||
|
|
||||||
if !reversedDomain.MatchString(key) {
|
|
||||||
errs = multierror.Append(errs,
|
|
||||||
specerror.NewError(
|
|
||||||
specerror.AnnotationsKeyReversedDomain,
|
|
||||||
fmt.Errorf("key %q SHOULD be named using a reverse domain notation", key),
|
|
||||||
rspec.Version))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// CapValid checks whether a capability is valid
|
|
||||||
func CapValid(c string, hostSpecific bool) error {
|
|
||||||
isValid := false
|
|
||||||
|
|
||||||
if !strings.HasPrefix(c, "CAP_") {
|
|
||||||
return fmt.Errorf("capability %s must start with CAP_", c)
|
|
||||||
}
|
|
||||||
for _, cap := range capability.List() {
|
|
||||||
if c == fmt.Sprintf("CAP_%s", strings.ToUpper(cap.String())) {
|
|
||||||
if hostSpecific && cap > LastCap() {
|
|
||||||
return fmt.Errorf("%s is not supported on the current host", c)
|
|
||||||
}
|
|
||||||
isValid = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !isValid {
|
|
||||||
return fmt.Errorf("invalid capability: %s", c)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func envValid(env string) bool {
|
|
||||||
items := strings.Split(env, "=")
|
|
||||||
if len(items) < 2 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for i, ch := range strings.TrimSpace(items[0]) {
|
|
||||||
if !unicode.IsDigit(ch) && !unicode.IsLetter(ch) && ch != '_' {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if i == 0 && unicode.IsDigit(ch) {
|
|
||||||
logrus.Warnf("Env %v: variable name beginning with digit is not recommended.", env)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *Validator) rlimitValid(rlimit rspec.POSIXRlimit) (errs error) {
|
|
||||||
if rlimit.Hard < rlimit.Soft {
|
|
||||||
errs = multierror.Append(errs, fmt.Errorf("hard limit of rlimit %s should not be less than soft limit", rlimit.Type))
|
|
||||||
}
|
|
||||||
|
|
||||||
if v.platform == "linux" {
|
|
||||||
for _, val := range linuxRlimits {
|
|
||||||
if val == rlimit.Type {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
errs = multierror.Append(errs, specerror.NewError(specerror.PosixProcRlimitsTypeValueError, fmt.Errorf("rlimit type %q may not be valid", rlimit.Type), v.spec.Version))
|
|
||||||
} else if v.platform == "solaris" {
|
|
||||||
for _, val := range posixRlimits {
|
|
||||||
if val == rlimit.Type {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
errs = multierror.Append(errs, specerror.NewError(specerror.PosixProcRlimitsTypeValueError, fmt.Errorf("rlimit type %q may not be valid", rlimit.Type), v.spec.Version))
|
|
||||||
} else {
|
|
||||||
logrus.Warnf("process.rlimits validation not yet implemented for platform %q", v.platform)
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func isStruct(t reflect.Type) bool {
|
|
||||||
return t.Kind() == reflect.Struct
|
|
||||||
}
|
|
||||||
|
|
||||||
func isStructPtr(t reflect.Type) bool {
|
|
||||||
return t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkMandatoryUnit(field reflect.Value, tagField reflect.StructField, parent string) (errs error) {
|
|
||||||
mandatory := !strings.Contains(tagField.Tag.Get("json"), "omitempty")
|
|
||||||
switch field.Kind() {
|
|
||||||
case reflect.Ptr:
|
|
||||||
if mandatory && field.IsNil() {
|
|
||||||
errs = multierror.Append(errs, fmt.Errorf("'%s.%s' should not be empty", parent, tagField.Name))
|
|
||||||
}
|
|
||||||
case reflect.String:
|
|
||||||
if mandatory && (field.Len() == 0) {
|
|
||||||
errs = multierror.Append(errs, fmt.Errorf("'%s.%s' should not be empty", parent, tagField.Name))
|
|
||||||
}
|
|
||||||
case reflect.Slice:
|
|
||||||
if mandatory && (field.IsNil() || field.Len() == 0) {
|
|
||||||
errs = multierror.Append(errs, fmt.Errorf("'%s.%s' should not be empty", parent, tagField.Name))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for index := 0; index < field.Len(); index++ {
|
|
||||||
mValue := field.Index(index)
|
|
||||||
if mValue.CanInterface() {
|
|
||||||
errs = multierror.Append(errs, checkMandatory(mValue.Interface()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case reflect.Map:
|
|
||||||
if mandatory && (field.IsNil() || field.Len() == 0) {
|
|
||||||
errs = multierror.Append(errs, fmt.Errorf("'%s.%s' should not be empty", parent, tagField.Name))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
keys := field.MapKeys()
|
|
||||||
for index := 0; index < len(keys); index++ {
|
|
||||||
mValue := field.MapIndex(keys[index])
|
|
||||||
if mValue.CanInterface() {
|
|
||||||
errs = multierror.Append(errs, checkMandatory(mValue.Interface()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkMandatory(obj interface{}) (errs error) {
|
|
||||||
objT := reflect.TypeOf(obj)
|
|
||||||
objV := reflect.ValueOf(obj)
|
|
||||||
if isStructPtr(objT) {
|
|
||||||
objT = objT.Elem()
|
|
||||||
objV = objV.Elem()
|
|
||||||
} else if !isStruct(objT) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < objT.NumField(); i++ {
|
|
||||||
t := objT.Field(i).Type
|
|
||||||
if isStructPtr(t) && objV.Field(i).IsNil() {
|
|
||||||
if !strings.Contains(objT.Field(i).Tag.Get("json"), "omitempty") {
|
|
||||||
errs = multierror.Append(errs, fmt.Errorf("'%s.%s' should not be empty", objT.Name(), objT.Field(i).Name))
|
|
||||||
}
|
|
||||||
} else if (isStruct(t) || isStructPtr(t)) && objV.Field(i).CanInterface() {
|
|
||||||
errs = multierror.Append(errs, checkMandatory(objV.Field(i).Interface()))
|
|
||||||
} else {
|
|
||||||
errs = multierror.Append(errs, checkMandatoryUnit(objV.Field(i), objT.Field(i), objT.Name()))
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// CheckMandatoryFields checks mandatory field of container's config file
|
|
||||||
func (v *Validator) CheckMandatoryFields() error {
|
|
||||||
logrus.Debugf("check mandatory fields")
|
|
||||||
|
|
||||||
if v.spec == nil {
|
|
||||||
return fmt.Errorf("Spec can't be nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
return checkMandatory(v.spec)
|
|
||||||
}
|
|
230
vendor/github.com/opencontainers/runtime-tools/validate/validate_linux.go
generated
vendored
230
vendor/github.com/opencontainers/runtime-tools/validate/validate_linux.go
generated
vendored
@ -1,230 +0,0 @@
|
|||||||
// +build linux
|
|
||||||
|
|
||||||
package validate
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"syscall"
|
|
||||||
|
|
||||||
"github.com/syndtr/gocapability/capability"
|
|
||||||
|
|
||||||
multierror "github.com/hashicorp/go-multierror"
|
|
||||||
rspec "github.com/opencontainers/runtime-spec/specs-go"
|
|
||||||
osFilepath "github.com/opencontainers/runtime-tools/filepath"
|
|
||||||
"github.com/opencontainers/runtime-tools/specerror"
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
)
|
|
||||||
|
|
||||||
// LastCap return last cap of system
|
|
||||||
func LastCap() capability.Cap {
|
|
||||||
last := capability.CAP_LAST_CAP
|
|
||||||
// hack for RHEL6 which has no /proc/sys/kernel/cap_last_cap
|
|
||||||
if last == capability.Cap(63) {
|
|
||||||
last = capability.CAP_BLOCK_SUSPEND
|
|
||||||
}
|
|
||||||
|
|
||||||
return last
|
|
||||||
}
|
|
||||||
|
|
||||||
func deviceValid(d rspec.LinuxDevice) bool {
|
|
||||||
switch d.Type {
|
|
||||||
case "b", "c", "u":
|
|
||||||
if d.Major <= 0 || d.Minor <= 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
case "p":
|
|
||||||
if d.Major != 0 || d.Minor != 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// CheckLinux checks v.spec.Linux
|
|
||||||
func (v *Validator) CheckLinux() (errs error) {
|
|
||||||
logrus.Debugf("check linux")
|
|
||||||
|
|
||||||
if v.spec.Linux == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var nsTypeList = map[rspec.LinuxNamespaceType]struct {
|
|
||||||
num int
|
|
||||||
newExist bool
|
|
||||||
}{
|
|
||||||
rspec.PIDNamespace: {0, false},
|
|
||||||
rspec.NetworkNamespace: {0, false},
|
|
||||||
rspec.MountNamespace: {0, false},
|
|
||||||
rspec.IPCNamespace: {0, false},
|
|
||||||
rspec.UTSNamespace: {0, false},
|
|
||||||
rspec.UserNamespace: {0, false},
|
|
||||||
rspec.CgroupNamespace: {0, false},
|
|
||||||
}
|
|
||||||
|
|
||||||
for index := 0; index < len(v.spec.Linux.Namespaces); index++ {
|
|
||||||
ns := v.spec.Linux.Namespaces[index]
|
|
||||||
if ns.Path != "" && !osFilepath.IsAbs(v.platform, ns.Path) {
|
|
||||||
errs = multierror.Append(errs, specerror.NewError(specerror.NSPathAbs, fmt.Errorf("namespace.path %q is not an absolute path", ns.Path), rspec.Version))
|
|
||||||
}
|
|
||||||
|
|
||||||
tmpItem := nsTypeList[ns.Type]
|
|
||||||
tmpItem.num = tmpItem.num + 1
|
|
||||||
if tmpItem.num > 1 {
|
|
||||||
errs = multierror.Append(errs, specerror.NewError(specerror.NSErrorOnDup, fmt.Errorf("duplicated namespace %q", ns.Type), rspec.Version))
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(ns.Path) == 0 {
|
|
||||||
tmpItem.newExist = true
|
|
||||||
}
|
|
||||||
nsTypeList[ns.Type] = tmpItem
|
|
||||||
}
|
|
||||||
|
|
||||||
if (len(v.spec.Linux.UIDMappings) > 0 || len(v.spec.Linux.GIDMappings) > 0) && !nsTypeList[rspec.UserNamespace].newExist {
|
|
||||||
errs = multierror.Append(errs, errors.New("the UID/GID mappings requires a new User namespace to be specified as well"))
|
|
||||||
}
|
|
||||||
|
|
||||||
for k := range v.spec.Linux.Sysctl {
|
|
||||||
if strings.HasPrefix(k, "net.") && !nsTypeList[rspec.NetworkNamespace].newExist {
|
|
||||||
errs = multierror.Append(errs, fmt.Errorf("sysctl %v requires a new Network namespace to be specified as well", k))
|
|
||||||
}
|
|
||||||
if strings.HasPrefix(k, "fs.mqueue.") {
|
|
||||||
if !nsTypeList[rspec.MountNamespace].newExist || !nsTypeList[rspec.IPCNamespace].newExist {
|
|
||||||
errs = multierror.Append(errs, fmt.Errorf("sysctl %v requires a new IPC namespace and Mount namespace to be specified as well", k))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if v.platform == "linux" && !nsTypeList[rspec.UTSNamespace].newExist && v.spec.Hostname != "" {
|
|
||||||
errs = multierror.Append(errs, fmt.Errorf("on Linux, hostname requires a new UTS namespace to be specified as well"))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Linux devices validation
|
|
||||||
devList := make(map[string]bool)
|
|
||||||
devTypeList := make(map[string]bool)
|
|
||||||
for index := 0; index < len(v.spec.Linux.Devices); index++ {
|
|
||||||
device := v.spec.Linux.Devices[index]
|
|
||||||
if !deviceValid(device) {
|
|
||||||
errs = multierror.Append(errs, fmt.Errorf("device %v is invalid", device))
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, exists := devList[device.Path]; exists {
|
|
||||||
errs = multierror.Append(errs, fmt.Errorf("device %s is duplicated", device.Path))
|
|
||||||
} else {
|
|
||||||
var rootfsPath string
|
|
||||||
if filepath.IsAbs(v.spec.Root.Path) {
|
|
||||||
rootfsPath = v.spec.Root.Path
|
|
||||||
} else {
|
|
||||||
rootfsPath = filepath.Join(v.bundlePath, v.spec.Root.Path)
|
|
||||||
}
|
|
||||||
absPath := filepath.Join(rootfsPath, device.Path)
|
|
||||||
fi, err := os.Stat(absPath)
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
devList[device.Path] = true
|
|
||||||
} else if err != nil {
|
|
||||||
errs = multierror.Append(errs, err)
|
|
||||||
} else {
|
|
||||||
fStat, ok := fi.Sys().(*syscall.Stat_t)
|
|
||||||
if !ok {
|
|
||||||
errs = multierror.Append(errs, specerror.NewError(specerror.DevicesAvailable,
|
|
||||||
fmt.Errorf("cannot determine state for device %s", device.Path), rspec.Version))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
var devType string
|
|
||||||
switch fStat.Mode & syscall.S_IFMT {
|
|
||||||
case syscall.S_IFCHR:
|
|
||||||
devType = "c"
|
|
||||||
case syscall.S_IFBLK:
|
|
||||||
devType = "b"
|
|
||||||
case syscall.S_IFIFO:
|
|
||||||
devType = "p"
|
|
||||||
default:
|
|
||||||
devType = "unmatched"
|
|
||||||
}
|
|
||||||
if devType != device.Type || (devType == "c" && device.Type == "u") {
|
|
||||||
errs = multierror.Append(errs, specerror.NewError(specerror.DevicesFileNotMatch,
|
|
||||||
fmt.Errorf("unmatched %s already exists in filesystem", device.Path), rspec.Version))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if devType != "p" {
|
|
||||||
dev := fStat.Rdev
|
|
||||||
major := (dev >> 8) & 0xfff
|
|
||||||
minor := (dev & 0xff) | ((dev >> 12) & 0xfff00)
|
|
||||||
if int64(major) != device.Major || int64(minor) != device.Minor {
|
|
||||||
errs = multierror.Append(errs, specerror.NewError(specerror.DevicesFileNotMatch,
|
|
||||||
fmt.Errorf("unmatched %s already exists in filesystem", device.Path), rspec.Version))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if device.FileMode != nil {
|
|
||||||
expectedPerm := *device.FileMode & os.ModePerm
|
|
||||||
actualPerm := fi.Mode() & os.ModePerm
|
|
||||||
if expectedPerm != actualPerm {
|
|
||||||
errs = multierror.Append(errs, specerror.NewError(specerror.DevicesFileNotMatch,
|
|
||||||
fmt.Errorf("unmatched %s already exists in filesystem", device.Path), rspec.Version))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if device.UID != nil {
|
|
||||||
if *device.UID != fStat.Uid {
|
|
||||||
errs = multierror.Append(errs, specerror.NewError(specerror.DevicesFileNotMatch,
|
|
||||||
fmt.Errorf("unmatched %s already exists in filesystem", device.Path), rspec.Version))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if device.GID != nil {
|
|
||||||
if *device.GID != fStat.Gid {
|
|
||||||
errs = multierror.Append(errs, specerror.NewError(specerror.DevicesFileNotMatch,
|
|
||||||
fmt.Errorf("unmatched %s already exists in filesystem", device.Path), rspec.Version))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// unify u->c when comparing, they are synonyms
|
|
||||||
var devID string
|
|
||||||
if device.Type == "u" {
|
|
||||||
devID = fmt.Sprintf("%s:%d:%d", "c", device.Major, device.Minor)
|
|
||||||
} else {
|
|
||||||
devID = fmt.Sprintf("%s:%d:%d", device.Type, device.Major, device.Minor)
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, exists := devTypeList[devID]; exists {
|
|
||||||
logrus.Warnf("%v", specerror.NewError(specerror.DevicesErrorOnDup, fmt.Errorf("type:%s, major:%d and minor:%d for linux devices is duplicated", device.Type, device.Major, device.Minor), rspec.Version))
|
|
||||||
} else {
|
|
||||||
devTypeList[devID] = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if v.spec.Linux.Resources != nil {
|
|
||||||
errs = multierror.Append(errs, v.CheckLinuxResources())
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, maskedPath := range v.spec.Linux.MaskedPaths {
|
|
||||||
if !strings.HasPrefix(maskedPath, "/") {
|
|
||||||
errs = multierror.Append(errs,
|
|
||||||
specerror.NewError(
|
|
||||||
specerror.MaskedPathsAbs,
|
|
||||||
fmt.Errorf("maskedPath %v is not an absolute path", maskedPath),
|
|
||||||
rspec.Version))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, readonlyPath := range v.spec.Linux.ReadonlyPaths {
|
|
||||||
if !strings.HasPrefix(readonlyPath, "/") {
|
|
||||||
errs = multierror.Append(errs,
|
|
||||||
specerror.NewError(
|
|
||||||
specerror.ReadonlyPathsAbs,
|
|
||||||
fmt.Errorf("readonlyPath %v is not an absolute path", readonlyPath),
|
|
||||||
rspec.Version))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
17
vendor/github.com/opencontainers/runtime-tools/validate/validate_unsupported.go
generated
vendored
17
vendor/github.com/opencontainers/runtime-tools/validate/validate_unsupported.go
generated
vendored
@ -1,17 +0,0 @@
|
|||||||
// +build !linux
|
|
||||||
|
|
||||||
package validate
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/syndtr/gocapability/capability"
|
|
||||||
)
|
|
||||||
|
|
||||||
// LastCap return last cap of system
|
|
||||||
func LastCap() capability.Cap {
|
|
||||||
return capability.Cap(-1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CheckLinux is a noop on this platform
|
|
||||||
func (v *Validator) CheckLinux() (errs error) {
|
|
||||||
return nil
|
|
||||||
}
|
|
202
vendor/github.com/xeipuuv/gojsonpointer/LICENSE-APACHE-2.0.txt
generated
vendored
202
vendor/github.com/xeipuuv/gojsonpointer/LICENSE-APACHE-2.0.txt
generated
vendored
@ -1,202 +0,0 @@
|
|||||||
|
|
||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
APPENDIX: How to apply the Apache License to your work.
|
|
||||||
|
|
||||||
To apply the Apache License to your work, attach the following
|
|
||||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
|
||||||
replaced with your own identifying information. (Don't include
|
|
||||||
the brackets!) The text should be enclosed in the appropriate
|
|
||||||
comment syntax for the file format. We also recommend that a
|
|
||||||
file or class name and description of purpose be included on the
|
|
||||||
same "printed page" as the copyright notice for easier
|
|
||||||
identification within third-party archives.
|
|
||||||
|
|
||||||
Copyright 2015 xeipuuv
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
41
vendor/github.com/xeipuuv/gojsonpointer/README.md
generated
vendored
41
vendor/github.com/xeipuuv/gojsonpointer/README.md
generated
vendored
@ -1,41 +0,0 @@
|
|||||||
# gojsonpointer
|
|
||||||
An implementation of JSON Pointer - Go language
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
jsonText := `{
|
|
||||||
"name": "Bobby B",
|
|
||||||
"occupation": {
|
|
||||||
"title" : "King",
|
|
||||||
"years" : 15,
|
|
||||||
"heir" : "Joffrey B"
|
|
||||||
}
|
|
||||||
}`
|
|
||||||
|
|
||||||
var jsonDocument map[string]interface{}
|
|
||||||
json.Unmarshal([]byte(jsonText), &jsonDocument)
|
|
||||||
|
|
||||||
//create a JSON pointer
|
|
||||||
pointerString := "/occupation/title"
|
|
||||||
pointer, _ := NewJsonPointer(pointerString)
|
|
||||||
|
|
||||||
//SET a new value for the "title" in the document
|
|
||||||
pointer.Set(jsonDocument, "Supreme Leader of Westeros")
|
|
||||||
|
|
||||||
//GET the new "title" from the document
|
|
||||||
title, _, _ := pointer.Get(jsonDocument)
|
|
||||||
fmt.Println(title) //outputs "Supreme Leader of Westeros"
|
|
||||||
|
|
||||||
//DELETE the "heir" from the document
|
|
||||||
deletePointer := NewJsonPointer("/occupation/heir")
|
|
||||||
deletePointer.Delete(jsonDocument)
|
|
||||||
|
|
||||||
b, _ := json.Marshal(jsonDocument)
|
|
||||||
fmt.Println(string(b))
|
|
||||||
//outputs `{"name":"Bobby B","occupation":{"title":"Supreme Leader of Westeros","years":15}}`
|
|
||||||
|
|
||||||
|
|
||||||
## References
|
|
||||||
http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07
|
|
||||||
|
|
||||||
### Note
|
|
||||||
The 4.Evaluation part of the previous reference, starting with 'If the currently referenced value is a JSON array, the reference token MUST contain either...' is not implemented.
|
|
211
vendor/github.com/xeipuuv/gojsonpointer/pointer.go
generated
vendored
211
vendor/github.com/xeipuuv/gojsonpointer/pointer.go
generated
vendored
@ -1,211 +0,0 @@
|
|||||||
// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// author xeipuuv
|
|
||||||
// author-github https://github.com/xeipuuv
|
|
||||||
// author-mail xeipuuv@gmail.com
|
|
||||||
//
|
|
||||||
// repository-name gojsonpointer
|
|
||||||
// repository-desc An implementation of JSON Pointer - Go language
|
|
||||||
//
|
|
||||||
// description Main and unique file.
|
|
||||||
//
|
|
||||||
// created 25-02-2013
|
|
||||||
|
|
||||||
package gojsonpointer
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
const_empty_pointer = ``
|
|
||||||
const_pointer_separator = `/`
|
|
||||||
|
|
||||||
const_invalid_start = `JSON pointer must be empty or start with a "` + const_pointer_separator + `"`
|
|
||||||
)
|
|
||||||
|
|
||||||
type implStruct struct {
|
|
||||||
mode string // "SET" or "GET"
|
|
||||||
|
|
||||||
inDocument interface{}
|
|
||||||
|
|
||||||
setInValue interface{}
|
|
||||||
|
|
||||||
getOutNode interface{}
|
|
||||||
getOutKind reflect.Kind
|
|
||||||
outError error
|
|
||||||
}
|
|
||||||
|
|
||||||
type JsonPointer struct {
|
|
||||||
referenceTokens []string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewJsonPointer parses the given string JSON pointer and returns an object
|
|
||||||
func NewJsonPointer(jsonPointerString string) (p JsonPointer, err error) {
|
|
||||||
|
|
||||||
// Pointer to the root of the document
|
|
||||||
if len(jsonPointerString) == 0 {
|
|
||||||
// Keep referenceTokens nil
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if jsonPointerString[0] != '/' {
|
|
||||||
return p, errors.New(const_invalid_start)
|
|
||||||
}
|
|
||||||
|
|
||||||
p.referenceTokens = strings.Split(jsonPointerString[1:], const_pointer_separator)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Uses the pointer to retrieve a value from a JSON document
|
|
||||||
func (p *JsonPointer) Get(document interface{}) (interface{}, reflect.Kind, error) {
|
|
||||||
|
|
||||||
is := &implStruct{mode: "GET", inDocument: document}
|
|
||||||
p.implementation(is)
|
|
||||||
return is.getOutNode, is.getOutKind, is.outError
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// Uses the pointer to update a value from a JSON document
|
|
||||||
func (p *JsonPointer) Set(document interface{}, value interface{}) (interface{}, error) {
|
|
||||||
|
|
||||||
is := &implStruct{mode: "SET", inDocument: document, setInValue: value}
|
|
||||||
p.implementation(is)
|
|
||||||
return document, is.outError
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// Uses the pointer to delete a value from a JSON document
|
|
||||||
func (p *JsonPointer) Delete(document interface{}) (interface{}, error) {
|
|
||||||
is := &implStruct{mode: "DEL", inDocument: document}
|
|
||||||
p.implementation(is)
|
|
||||||
return document, is.outError
|
|
||||||
}
|
|
||||||
|
|
||||||
// Both Get and Set functions use the same implementation to avoid code duplication
|
|
||||||
func (p *JsonPointer) implementation(i *implStruct) {
|
|
||||||
|
|
||||||
kind := reflect.Invalid
|
|
||||||
|
|
||||||
// Full document when empty
|
|
||||||
if len(p.referenceTokens) == 0 {
|
|
||||||
i.getOutNode = i.inDocument
|
|
||||||
i.outError = nil
|
|
||||||
i.getOutKind = kind
|
|
||||||
i.outError = nil
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
node := i.inDocument
|
|
||||||
|
|
||||||
previousNodes := make([]interface{}, len(p.referenceTokens))
|
|
||||||
previousTokens := make([]string, len(p.referenceTokens))
|
|
||||||
|
|
||||||
for ti, token := range p.referenceTokens {
|
|
||||||
|
|
||||||
isLastToken := ti == len(p.referenceTokens)-1
|
|
||||||
previousNodes[ti] = node
|
|
||||||
previousTokens[ti] = token
|
|
||||||
|
|
||||||
switch v := node.(type) {
|
|
||||||
|
|
||||||
case map[string]interface{}:
|
|
||||||
decodedToken := decodeReferenceToken(token)
|
|
||||||
if _, ok := v[decodedToken]; ok {
|
|
||||||
node = v[decodedToken]
|
|
||||||
if isLastToken && i.mode == "SET" {
|
|
||||||
v[decodedToken] = i.setInValue
|
|
||||||
} else if isLastToken && i.mode =="DEL" {
|
|
||||||
delete(v,decodedToken)
|
|
||||||
}
|
|
||||||
} else if (isLastToken && i.mode == "SET") {
|
|
||||||
v[decodedToken] = i.setInValue
|
|
||||||
} else {
|
|
||||||
i.outError = fmt.Errorf("Object has no key '%s'", decodedToken)
|
|
||||||
i.getOutKind = reflect.Map
|
|
||||||
i.getOutNode = nil
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
case []interface{}:
|
|
||||||
tokenIndex, err := strconv.Atoi(token)
|
|
||||||
if err != nil {
|
|
||||||
i.outError = fmt.Errorf("Invalid array index '%s'", token)
|
|
||||||
i.getOutKind = reflect.Slice
|
|
||||||
i.getOutNode = nil
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if tokenIndex < 0 || tokenIndex >= len(v) {
|
|
||||||
i.outError = fmt.Errorf("Out of bound array[0,%d] index '%d'", len(v), tokenIndex)
|
|
||||||
i.getOutKind = reflect.Slice
|
|
||||||
i.getOutNode = nil
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
node = v[tokenIndex]
|
|
||||||
if isLastToken && i.mode == "SET" {
|
|
||||||
v[tokenIndex] = i.setInValue
|
|
||||||
} else if isLastToken && i.mode =="DEL" {
|
|
||||||
v[tokenIndex] = v[len(v)-1]
|
|
||||||
v[len(v)-1] = nil
|
|
||||||
v = v[:len(v)-1]
|
|
||||||
previousNodes[ti-1].(map[string]interface{})[previousTokens[ti-1]] = v
|
|
||||||
}
|
|
||||||
|
|
||||||
default:
|
|
||||||
i.outError = fmt.Errorf("Invalid token reference '%s'", token)
|
|
||||||
i.getOutKind = reflect.ValueOf(node).Kind()
|
|
||||||
i.getOutNode = nil
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
i.getOutNode = node
|
|
||||||
i.getOutKind = reflect.ValueOf(node).Kind()
|
|
||||||
i.outError = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pointer to string representation function
|
|
||||||
func (p *JsonPointer) String() string {
|
|
||||||
|
|
||||||
if len(p.referenceTokens) == 0 {
|
|
||||||
return const_empty_pointer
|
|
||||||
}
|
|
||||||
|
|
||||||
pointerString := const_pointer_separator + strings.Join(p.referenceTokens, const_pointer_separator)
|
|
||||||
|
|
||||||
return pointerString
|
|
||||||
}
|
|
||||||
|
|
||||||
// Specific JSON pointer encoding here
|
|
||||||
// ~0 => ~
|
|
||||||
// ~1 => /
|
|
||||||
// ... and vice versa
|
|
||||||
|
|
||||||
func decodeReferenceToken(token string) string {
|
|
||||||
step1 := strings.Replace(token, `~1`, `/`, -1)
|
|
||||||
step2 := strings.Replace(step1, `~0`, `~`, -1)
|
|
||||||
return step2
|
|
||||||
}
|
|
||||||
|
|
||||||
func encodeReferenceToken(token string) string {
|
|
||||||
step1 := strings.Replace(token, `~`, `~0`, -1)
|
|
||||||
step2 := strings.Replace(step1, `/`, `~1`, -1)
|
|
||||||
return step2
|
|
||||||
}
|
|
202
vendor/github.com/xeipuuv/gojsonreference/LICENSE-APACHE-2.0.txt
generated
vendored
202
vendor/github.com/xeipuuv/gojsonreference/LICENSE-APACHE-2.0.txt
generated
vendored
@ -1,202 +0,0 @@
|
|||||||
|
|
||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
APPENDIX: How to apply the Apache License to your work.
|
|
||||||
|
|
||||||
To apply the Apache License to your work, attach the following
|
|
||||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
|
||||||
replaced with your own identifying information. (Don't include
|
|
||||||
the brackets!) The text should be enclosed in the appropriate
|
|
||||||
comment syntax for the file format. We also recommend that a
|
|
||||||
file or class name and description of purpose be included on the
|
|
||||||
same "printed page" as the copyright notice for easier
|
|
||||||
identification within third-party archives.
|
|
||||||
|
|
||||||
Copyright 2015 xeipuuv
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
10
vendor/github.com/xeipuuv/gojsonreference/README.md
generated
vendored
10
vendor/github.com/xeipuuv/gojsonreference/README.md
generated
vendored
@ -1,10 +0,0 @@
|
|||||||
# gojsonreference
|
|
||||||
An implementation of JSON Reference - Go language
|
|
||||||
|
|
||||||
## Dependencies
|
|
||||||
https://github.com/xeipuuv/gojsonpointer
|
|
||||||
|
|
||||||
## References
|
|
||||||
http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07
|
|
||||||
|
|
||||||
http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03
|
|
147
vendor/github.com/xeipuuv/gojsonreference/reference.go
generated
vendored
147
vendor/github.com/xeipuuv/gojsonreference/reference.go
generated
vendored
@ -1,147 +0,0 @@
|
|||||||
// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// author xeipuuv
|
|
||||||
// author-github https://github.com/xeipuuv
|
|
||||||
// author-mail xeipuuv@gmail.com
|
|
||||||
//
|
|
||||||
// repository-name gojsonreference
|
|
||||||
// repository-desc An implementation of JSON Reference - Go language
|
|
||||||
//
|
|
||||||
// description Main and unique file.
|
|
||||||
//
|
|
||||||
// created 26-02-2013
|
|
||||||
|
|
||||||
package gojsonreference
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"net/url"
|
|
||||||
"path/filepath"
|
|
||||||
"runtime"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/xeipuuv/gojsonpointer"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
const_fragment_char = `#`
|
|
||||||
)
|
|
||||||
|
|
||||||
func NewJsonReference(jsonReferenceString string) (JsonReference, error) {
|
|
||||||
|
|
||||||
var r JsonReference
|
|
||||||
err := r.parse(jsonReferenceString)
|
|
||||||
return r, err
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
type JsonReference struct {
|
|
||||||
referenceUrl *url.URL
|
|
||||||
referencePointer gojsonpointer.JsonPointer
|
|
||||||
|
|
||||||
HasFullUrl bool
|
|
||||||
HasUrlPathOnly bool
|
|
||||||
HasFragmentOnly bool
|
|
||||||
HasFileScheme bool
|
|
||||||
HasFullFilePath bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *JsonReference) GetUrl() *url.URL {
|
|
||||||
return r.referenceUrl
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *JsonReference) GetPointer() *gojsonpointer.JsonPointer {
|
|
||||||
return &r.referencePointer
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *JsonReference) String() string {
|
|
||||||
|
|
||||||
if r.referenceUrl != nil {
|
|
||||||
return r.referenceUrl.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
if r.HasFragmentOnly {
|
|
||||||
return const_fragment_char + r.referencePointer.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
return r.referencePointer.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *JsonReference) IsCanonical() bool {
|
|
||||||
return (r.HasFileScheme && r.HasFullFilePath) || (!r.HasFileScheme && r.HasFullUrl)
|
|
||||||
}
|
|
||||||
|
|
||||||
// "Constructor", parses the given string JSON reference
|
|
||||||
func (r *JsonReference) parse(jsonReferenceString string) (err error) {
|
|
||||||
|
|
||||||
r.referenceUrl, err = url.Parse(jsonReferenceString)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
refUrl := r.referenceUrl
|
|
||||||
|
|
||||||
if refUrl.Scheme != "" && refUrl.Host != "" {
|
|
||||||
r.HasFullUrl = true
|
|
||||||
} else {
|
|
||||||
if refUrl.Path != "" {
|
|
||||||
r.HasUrlPathOnly = true
|
|
||||||
} else if refUrl.RawQuery == "" && refUrl.Fragment != "" {
|
|
||||||
r.HasFragmentOnly = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
r.HasFileScheme = refUrl.Scheme == "file"
|
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
// on Windows, a file URL may have an extra leading slash, and if it
|
|
||||||
// doesn't then its first component will be treated as the host by the
|
|
||||||
// Go runtime
|
|
||||||
if refUrl.Host == "" && strings.HasPrefix(refUrl.Path, "/") {
|
|
||||||
r.HasFullFilePath = filepath.IsAbs(refUrl.Path[1:])
|
|
||||||
} else {
|
|
||||||
r.HasFullFilePath = filepath.IsAbs(refUrl.Host + refUrl.Path)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
r.HasFullFilePath = filepath.IsAbs(refUrl.Path)
|
|
||||||
}
|
|
||||||
|
|
||||||
// invalid json-pointer error means url has no json-pointer fragment. simply ignore error
|
|
||||||
r.referencePointer, _ = gojsonpointer.NewJsonPointer(refUrl.Fragment)
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Creates a new reference from a parent and a child
|
|
||||||
// If the child cannot inherit from the parent, an error is returned
|
|
||||||
func (r *JsonReference) Inherits(child JsonReference) (*JsonReference, error) {
|
|
||||||
if child.GetUrl() == nil {
|
|
||||||
return nil, errors.New("childUrl is nil!")
|
|
||||||
}
|
|
||||||
|
|
||||||
if r.GetUrl() == nil {
|
|
||||||
return nil, errors.New("parentUrl is nil!")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get a copy of the parent url to make sure we do not modify the original.
|
|
||||||
// URL reference resolving fails if the fragment of the child is empty, but the parent's is not.
|
|
||||||
// The fragment of the child must be used, so the fragment of the parent is manually removed.
|
|
||||||
parentUrl := *r.GetUrl()
|
|
||||||
parentUrl.Fragment = ""
|
|
||||||
|
|
||||||
ref, err := NewJsonReference(parentUrl.ResolveReference(child.GetUrl()).String())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &ref, err
|
|
||||||
}
|
|
202
vendor/github.com/xeipuuv/gojsonschema/LICENSE-APACHE-2.0.txt
generated
vendored
202
vendor/github.com/xeipuuv/gojsonschema/LICENSE-APACHE-2.0.txt
generated
vendored
@ -1,202 +0,0 @@
|
|||||||
|
|
||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
APPENDIX: How to apply the Apache License to your work.
|
|
||||||
|
|
||||||
To apply the Apache License to your work, attach the following
|
|
||||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
|
||||||
replaced with your own identifying information. (Don't include
|
|
||||||
the brackets!) The text should be enclosed in the appropriate
|
|
||||||
comment syntax for the file format. We also recommend that a
|
|
||||||
file or class name and description of purpose be included on the
|
|
||||||
same "printed page" as the copyright notice for easier
|
|
||||||
identification within third-party archives.
|
|
||||||
|
|
||||||
Copyright 2015 xeipuuv
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
351
vendor/github.com/xeipuuv/gojsonschema/README.md
generated
vendored
351
vendor/github.com/xeipuuv/gojsonschema/README.md
generated
vendored
@ -1,351 +0,0 @@
|
|||||||
[](https://godoc.org/github.com/xeipuuv/gojsonschema)
|
|
||||||
[](https://travis-ci.org/xeipuuv/gojsonschema)
|
|
||||||
|
|
||||||
# gojsonschema
|
|
||||||
|
|
||||||
## Description
|
|
||||||
|
|
||||||
An implementation of JSON Schema for the Go programming language. Supports draft-04, draft-06 and draft-07.
|
|
||||||
|
|
||||||
References :
|
|
||||||
|
|
||||||
* http://json-schema.org
|
|
||||||
* http://json-schema.org/latest/json-schema-core.html
|
|
||||||
* http://json-schema.org/latest/json-schema-validation.html
|
|
||||||
|
|
||||||
## Installation
|
|
||||||
|
|
||||||
```
|
|
||||||
go get github.com/xeipuuv/gojsonschema
|
|
||||||
```
|
|
||||||
|
|
||||||
Dependencies :
|
|
||||||
* [github.com/xeipuuv/gojsonpointer](https://github.com/xeipuuv/gojsonpointer)
|
|
||||||
* [github.com/xeipuuv/gojsonreference](https://github.com/xeipuuv/gojsonreference)
|
|
||||||
* [github.com/stretchr/testify/assert](https://github.com/stretchr/testify#assert-package)
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
|
|
||||||
### Example
|
|
||||||
|
|
||||||
```go
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"github.com/xeipuuv/gojsonschema"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
|
|
||||||
schemaLoader := gojsonschema.NewReferenceLoader("file:///home/me/schema.json")
|
|
||||||
documentLoader := gojsonschema.NewReferenceLoader("file:///home/me/document.json")
|
|
||||||
|
|
||||||
result, err := gojsonschema.Validate(schemaLoader, documentLoader)
|
|
||||||
if err != nil {
|
|
||||||
panic(err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
if result.Valid() {
|
|
||||||
fmt.Printf("The document is valid\n")
|
|
||||||
} else {
|
|
||||||
fmt.Printf("The document is not valid. see errors :\n")
|
|
||||||
for _, desc := range result.Errors() {
|
|
||||||
fmt.Printf("- %s\n", desc)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Loaders
|
|
||||||
|
|
||||||
There are various ways to load your JSON data.
|
|
||||||
In order to load your schemas and documents,
|
|
||||||
first declare an appropriate loader :
|
|
||||||
|
|
||||||
* Web / HTTP, using a reference :
|
|
||||||
|
|
||||||
```go
|
|
||||||
loader := gojsonschema.NewReferenceLoader("http://www.some_host.com/schema.json")
|
|
||||||
```
|
|
||||||
|
|
||||||
* Local file, using a reference :
|
|
||||||
|
|
||||||
```go
|
|
||||||
loader := gojsonschema.NewReferenceLoader("file:///home/me/schema.json")
|
|
||||||
```
|
|
||||||
|
|
||||||
References use the URI scheme, the prefix (file://) and a full path to the file are required.
|
|
||||||
|
|
||||||
* JSON strings :
|
|
||||||
|
|
||||||
```go
|
|
||||||
loader := gojsonschema.NewStringLoader(`{"type": "string"}`)
|
|
||||||
```
|
|
||||||
|
|
||||||
* Custom Go types :
|
|
||||||
|
|
||||||
```go
|
|
||||||
m := map[string]interface{}{"type": "string"}
|
|
||||||
loader := gojsonschema.NewGoLoader(m)
|
|
||||||
```
|
|
||||||
|
|
||||||
And
|
|
||||||
|
|
||||||
```go
|
|
||||||
type Root struct {
|
|
||||||
Users []User `json:"users"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type User struct {
|
|
||||||
Name string `json:"name"`
|
|
||||||
}
|
|
||||||
|
|
||||||
...
|
|
||||||
|
|
||||||
data := Root{}
|
|
||||||
data.Users = append(data.Users, User{"John"})
|
|
||||||
data.Users = append(data.Users, User{"Sophia"})
|
|
||||||
data.Users = append(data.Users, User{"Bill"})
|
|
||||||
|
|
||||||
loader := gojsonschema.NewGoLoader(data)
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Validation
|
|
||||||
|
|
||||||
Once the loaders are set, validation is easy :
|
|
||||||
|
|
||||||
```go
|
|
||||||
result, err := gojsonschema.Validate(schemaLoader, documentLoader)
|
|
||||||
```
|
|
||||||
|
|
||||||
Alternatively, you might want to load a schema only once and process to multiple validations :
|
|
||||||
|
|
||||||
```go
|
|
||||||
schema, err := gojsonschema.NewSchema(schemaLoader)
|
|
||||||
...
|
|
||||||
result1, err := schema.Validate(documentLoader1)
|
|
||||||
...
|
|
||||||
result2, err := schema.Validate(documentLoader2)
|
|
||||||
...
|
|
||||||
// etc ...
|
|
||||||
```
|
|
||||||
|
|
||||||
To check the result :
|
|
||||||
|
|
||||||
```go
|
|
||||||
if result.Valid() {
|
|
||||||
fmt.Printf("The document is valid\n")
|
|
||||||
} else {
|
|
||||||
fmt.Printf("The document is not valid. see errors :\n")
|
|
||||||
for _, err := range result.Errors() {
|
|
||||||
// Err implements the ResultError interface
|
|
||||||
fmt.Printf("- %s\n", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Working with Errors
|
|
||||||
|
|
||||||
The library handles string error codes which you can customize by creating your own gojsonschema.locale and setting it
|
|
||||||
```go
|
|
||||||
gojsonschema.Locale = YourCustomLocale{}
|
|
||||||
```
|
|
||||||
|
|
||||||
However, each error contains additional contextual information.
|
|
||||||
|
|
||||||
Newer versions of `gojsonschema` may have new additional errors, so code that uses a custom locale will need to be updated when this happens.
|
|
||||||
|
|
||||||
**err.Type()**: *string* Returns the "type" of error that occurred. Note you can also type check. See below
|
|
||||||
|
|
||||||
Note: An error of RequiredType has an err.Type() return value of "required"
|
|
||||||
|
|
||||||
"required": RequiredError
|
|
||||||
"invalid_type": InvalidTypeError
|
|
||||||
"number_any_of": NumberAnyOfError
|
|
||||||
"number_one_of": NumberOneOfError
|
|
||||||
"number_all_of": NumberAllOfError
|
|
||||||
"number_not": NumberNotError
|
|
||||||
"missing_dependency": MissingDependencyError
|
|
||||||
"internal": InternalError
|
|
||||||
"const": ConstEror
|
|
||||||
"enum": EnumError
|
|
||||||
"array_no_additional_items": ArrayNoAdditionalItemsError
|
|
||||||
"array_min_items": ArrayMinItemsError
|
|
||||||
"array_max_items": ArrayMaxItemsError
|
|
||||||
"unique": ItemsMustBeUniqueError
|
|
||||||
"contains" : ArrayContainsError
|
|
||||||
"array_min_properties": ArrayMinPropertiesError
|
|
||||||
"array_max_properties": ArrayMaxPropertiesError
|
|
||||||
"additional_property_not_allowed": AdditionalPropertyNotAllowedError
|
|
||||||
"invalid_property_pattern": InvalidPropertyPatternError
|
|
||||||
"invalid_property_name": InvalidPropertyNameError
|
|
||||||
"string_gte": StringLengthGTEError
|
|
||||||
"string_lte": StringLengthLTEError
|
|
||||||
"pattern": DoesNotMatchPatternError
|
|
||||||
"multiple_of": MultipleOfError
|
|
||||||
"number_gte": NumberGTEError
|
|
||||||
"number_gt": NumberGTError
|
|
||||||
"number_lte": NumberLTEError
|
|
||||||
"number_lt": NumberLTError
|
|
||||||
|
|
||||||
**err.Value()**: *interface{}* Returns the value given
|
|
||||||
|
|
||||||
**err.Context()**: *gojsonschema.JsonContext* Returns the context. This has a String() method that will print something like this: (root).firstName
|
|
||||||
|
|
||||||
**err.Field()**: *string* Returns the fieldname in the format firstName, or for embedded properties, person.firstName. This returns the same as the String() method on *err.Context()* but removes the (root). prefix.
|
|
||||||
|
|
||||||
**err.Description()**: *string* The error description. This is based on the locale you are using. See the beginning of this section for overwriting the locale with a custom implementation.
|
|
||||||
|
|
||||||
**err.DescriptionFormat()**: *string* The error description format. This is relevant if you are adding custom validation errors afterwards to the result.
|
|
||||||
|
|
||||||
**err.Details()**: *gojsonschema.ErrorDetails* Returns a map[string]interface{} of additional error details specific to the error. For example, GTE errors will have a "min" value, LTE will have a "max" value. See errors.go for a full description of all the error details. Every error always contains a "field" key that holds the value of *err.Field()*
|
|
||||||
|
|
||||||
Note in most cases, the err.Details() will be used to generate replacement strings in your locales, and not used directly. These strings follow the text/template format i.e.
|
|
||||||
```
|
|
||||||
{{.field}} must be greater than or equal to {{.min}}
|
|
||||||
```
|
|
||||||
|
|
||||||
The library allows you to specify custom template functions, should you require more complex error message handling.
|
|
||||||
```go
|
|
||||||
gojsonschema.ErrorTemplateFuncs = map[string]interface{}{
|
|
||||||
"allcaps": func(s string) string {
|
|
||||||
return strings.ToUpper(s)
|
|
||||||
},
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Given the above definition, you can use the custom function `"allcaps"` in your localization templates:
|
|
||||||
```
|
|
||||||
{{allcaps .field}} must be greater than or equal to {{.min}}
|
|
||||||
```
|
|
||||||
|
|
||||||
The above error message would then be rendered with the `field` value in capital letters. For example:
|
|
||||||
```
|
|
||||||
"PASSWORD must be greater than or equal to 8"
|
|
||||||
```
|
|
||||||
|
|
||||||
Learn more about what types of template functions you can use in `ErrorTemplateFuncs` by referring to Go's [text/template FuncMap](https://golang.org/pkg/text/template/#FuncMap) type.
|
|
||||||
|
|
||||||
## Formats
|
|
||||||
JSON Schema allows for optional "format" property to validate instances against well-known formats. gojsonschema ships with all of the formats defined in the spec that you can use like this:
|
|
||||||
````json
|
|
||||||
{"type": "string", "format": "email"}
|
|
||||||
````
|
|
||||||
Available formats: date-time, hostname, email, ipv4, ipv6, uri, uri-reference, uuid, regex. Some of the new formats in draft-06 and draft-07 are not yet implemented.
|
|
||||||
|
|
||||||
For repetitive or more complex formats, you can create custom format checkers and add them to gojsonschema like this:
|
|
||||||
|
|
||||||
```go
|
|
||||||
// Define the format checker
|
|
||||||
type RoleFormatChecker struct {}
|
|
||||||
|
|
||||||
// Ensure it meets the gojsonschema.FormatChecker interface
|
|
||||||
func (f RoleFormatChecker) IsFormat(input interface{}) bool {
|
|
||||||
|
|
||||||
asString, ok := input.(string)
|
|
||||||
if ok == false {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return strings.HasPrefix("ROLE_", asString)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add it to the library
|
|
||||||
gojsonschema.FormatCheckers.Add("role", RoleFormatChecker{})
|
|
||||||
````
|
|
||||||
|
|
||||||
Now to use in your json schema:
|
|
||||||
````json
|
|
||||||
{"type": "string", "format": "role"}
|
|
||||||
````
|
|
||||||
|
|
||||||
Another example would be to check if the provided integer matches an id on database:
|
|
||||||
|
|
||||||
JSON schema:
|
|
||||||
```json
|
|
||||||
{"type": "integer", "format": "ValidUserId"}
|
|
||||||
```
|
|
||||||
|
|
||||||
```go
|
|
||||||
// Define the format checker
|
|
||||||
type ValidUserIdFormatChecker struct {}
|
|
||||||
|
|
||||||
// Ensure it meets the gojsonschema.FormatChecker interface
|
|
||||||
func (f ValidUserIdFormatChecker) IsFormat(input interface{}) bool {
|
|
||||||
|
|
||||||
asFloat64, ok := input.(float64) // Numbers are always float64 here
|
|
||||||
if ok == false {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// XXX
|
|
||||||
// do the magic on the database looking for the int(asFloat64)
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add it to the library
|
|
||||||
gojsonschema.FormatCheckers.Add("ValidUserId", ValidUserIdFormatChecker{})
|
|
||||||
````
|
|
||||||
|
|
||||||
## Additional custom validation
|
|
||||||
After the validation has run and you have the results, you may add additional
|
|
||||||
errors using `Result.AddError`. This is useful to maintain the same format within the resultset instead
|
|
||||||
of having to add special exceptions for your own errors. Below is an example.
|
|
||||||
|
|
||||||
```go
|
|
||||||
type AnswerInvalidError struct {
|
|
||||||
gojsonschema.ResultErrorFields
|
|
||||||
}
|
|
||||||
|
|
||||||
func newAnswerInvalidError(context *gojsonschema.JsonContext, value interface{}, details gojsonschema.ErrorDetails) *AnswerInvalidError {
|
|
||||||
err := AnswerInvalidError{}
|
|
||||||
err.SetContext(context)
|
|
||||||
err.SetType("custom_invalid_error")
|
|
||||||
// it is important to use SetDescriptionFormat() as this is used to call SetDescription() after it has been parsed
|
|
||||||
// using the description of err will be overridden by this.
|
|
||||||
err.SetDescriptionFormat("Answer to the Ultimate Question of Life, the Universe, and Everything is {{.answer}}")
|
|
||||||
err.SetValue(value)
|
|
||||||
err.SetDetails(details)
|
|
||||||
|
|
||||||
return &err
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
// ...
|
|
||||||
schema, err := gojsonschema.NewSchema(schemaLoader)
|
|
||||||
result, err := gojsonschema.Validate(schemaLoader, documentLoader)
|
|
||||||
|
|
||||||
if true { // some validation
|
|
||||||
jsonContext := gojsonschema.NewJsonContext("question", nil)
|
|
||||||
errDetail := gojsonschema.ErrorDetails{
|
|
||||||
"answer": 42,
|
|
||||||
}
|
|
||||||
result.AddError(
|
|
||||||
newAnswerInvalidError(
|
|
||||||
gojsonschema.NewJsonContext("answer", jsonContext),
|
|
||||||
52,
|
|
||||||
errDetail,
|
|
||||||
),
|
|
||||||
errDetail,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
return result, err
|
|
||||||
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
This is especially useful if you want to add validation beyond what the
|
|
||||||
json schema drafts can provide such business specific logic.
|
|
||||||
|
|
||||||
## Uses
|
|
||||||
|
|
||||||
gojsonschema uses the following test suite :
|
|
||||||
|
|
||||||
https://github.com/json-schema/JSON-Schema-Test-Suite
|
|
324
vendor/github.com/xeipuuv/gojsonschema/errors.go
generated
vendored
324
vendor/github.com/xeipuuv/gojsonschema/errors.go
generated
vendored
@ -1,324 +0,0 @@
|
|||||||
package gojsonschema
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"sync"
|
|
||||||
"text/template"
|
|
||||||
)
|
|
||||||
|
|
||||||
var errorTemplates errorTemplate = errorTemplate{template.New("errors-new"), sync.RWMutex{}}
|
|
||||||
|
|
||||||
// template.Template is not thread-safe for writing, so some locking is done
|
|
||||||
// sync.RWMutex is used for efficiently locking when new templates are created
|
|
||||||
type errorTemplate struct {
|
|
||||||
*template.Template
|
|
||||||
sync.RWMutex
|
|
||||||
}
|
|
||||||
|
|
||||||
type (
|
|
||||||
// RequiredError. ErrorDetails: property string
|
|
||||||
RequiredError struct {
|
|
||||||
ResultErrorFields
|
|
||||||
}
|
|
||||||
|
|
||||||
// InvalidTypeError. ErrorDetails: expected, given
|
|
||||||
InvalidTypeError struct {
|
|
||||||
ResultErrorFields
|
|
||||||
}
|
|
||||||
|
|
||||||
// NumberAnyOfError. ErrorDetails: -
|
|
||||||
NumberAnyOfError struct {
|
|
||||||
ResultErrorFields
|
|
||||||
}
|
|
||||||
|
|
||||||
// NumberOneOfError. ErrorDetails: -
|
|
||||||
NumberOneOfError struct {
|
|
||||||
ResultErrorFields
|
|
||||||
}
|
|
||||||
|
|
||||||
// NumberAllOfError. ErrorDetails: -
|
|
||||||
NumberAllOfError struct {
|
|
||||||
ResultErrorFields
|
|
||||||
}
|
|
||||||
|
|
||||||
// NumberNotError. ErrorDetails: -
|
|
||||||
NumberNotError struct {
|
|
||||||
ResultErrorFields
|
|
||||||
}
|
|
||||||
|
|
||||||
// MissingDependencyError. ErrorDetails: dependency
|
|
||||||
MissingDependencyError struct {
|
|
||||||
ResultErrorFields
|
|
||||||
}
|
|
||||||
|
|
||||||
// InternalError. ErrorDetails: error
|
|
||||||
InternalError struct {
|
|
||||||
ResultErrorFields
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConstError. ErrorDetails: allowed
|
|
||||||
ConstError struct {
|
|
||||||
ResultErrorFields
|
|
||||||
}
|
|
||||||
|
|
||||||
// EnumError. ErrorDetails: allowed
|
|
||||||
EnumError struct {
|
|
||||||
ResultErrorFields
|
|
||||||
}
|
|
||||||
|
|
||||||
// ArrayNoAdditionalItemsError. ErrorDetails: -
|
|
||||||
ArrayNoAdditionalItemsError struct {
|
|
||||||
ResultErrorFields
|
|
||||||
}
|
|
||||||
|
|
||||||
// ArrayMinItemsError. ErrorDetails: min
|
|
||||||
ArrayMinItemsError struct {
|
|
||||||
ResultErrorFields
|
|
||||||
}
|
|
||||||
|
|
||||||
// ArrayMaxItemsError. ErrorDetails: max
|
|
||||||
ArrayMaxItemsError struct {
|
|
||||||
ResultErrorFields
|
|
||||||
}
|
|
||||||
|
|
||||||
// ItemsMustBeUniqueError. ErrorDetails: type
|
|
||||||
ItemsMustBeUniqueError struct {
|
|
||||||
ResultErrorFields
|
|
||||||
}
|
|
||||||
|
|
||||||
// ArrayContainsError. ErrorDetails:
|
|
||||||
ArrayContainsError struct {
|
|
||||||
ResultErrorFields
|
|
||||||
}
|
|
||||||
|
|
||||||
// ArrayMinPropertiesError. ErrorDetails: min
|
|
||||||
ArrayMinPropertiesError struct {
|
|
||||||
ResultErrorFields
|
|
||||||
}
|
|
||||||
|
|
||||||
// ArrayMaxPropertiesError. ErrorDetails: max
|
|
||||||
ArrayMaxPropertiesError struct {
|
|
||||||
ResultErrorFields
|
|
||||||
}
|
|
||||||
|
|
||||||
// AdditionalPropertyNotAllowedError. ErrorDetails: property
|
|
||||||
AdditionalPropertyNotAllowedError struct {
|
|
||||||
ResultErrorFields
|
|
||||||
}
|
|
||||||
|
|
||||||
// InvalidPropertyPatternError. ErrorDetails: property, pattern
|
|
||||||
InvalidPropertyPatternError struct {
|
|
||||||
ResultErrorFields
|
|
||||||
}
|
|
||||||
|
|
||||||
// InvalidPopertyNameError. ErrorDetails: property
|
|
||||||
InvalidPropertyNameError struct {
|
|
||||||
ResultErrorFields
|
|
||||||
}
|
|
||||||
|
|
||||||
// StringLengthGTEError. ErrorDetails: min
|
|
||||||
StringLengthGTEError struct {
|
|
||||||
ResultErrorFields
|
|
||||||
}
|
|
||||||
|
|
||||||
// StringLengthLTEError. ErrorDetails: max
|
|
||||||
StringLengthLTEError struct {
|
|
||||||
ResultErrorFields
|
|
||||||
}
|
|
||||||
|
|
||||||
// DoesNotMatchPatternError. ErrorDetails: pattern
|
|
||||||
DoesNotMatchPatternError struct {
|
|
||||||
ResultErrorFields
|
|
||||||
}
|
|
||||||
|
|
||||||
// DoesNotMatchFormatError. ErrorDetails: format
|
|
||||||
DoesNotMatchFormatError struct {
|
|
||||||
ResultErrorFields
|
|
||||||
}
|
|
||||||
|
|
||||||
// MultipleOfError. ErrorDetails: multiple
|
|
||||||
MultipleOfError struct {
|
|
||||||
ResultErrorFields
|
|
||||||
}
|
|
||||||
|
|
||||||
// NumberGTEError. ErrorDetails: min
|
|
||||||
NumberGTEError struct {
|
|
||||||
ResultErrorFields
|
|
||||||
}
|
|
||||||
|
|
||||||
// NumberGTError. ErrorDetails: min
|
|
||||||
NumberGTError struct {
|
|
||||||
ResultErrorFields
|
|
||||||
}
|
|
||||||
|
|
||||||
// NumberLTEError. ErrorDetails: max
|
|
||||||
NumberLTEError struct {
|
|
||||||
ResultErrorFields
|
|
||||||
}
|
|
||||||
|
|
||||||
// NumberLTError. ErrorDetails: max
|
|
||||||
NumberLTError struct {
|
|
||||||
ResultErrorFields
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConditionThenError. ErrorDetails: -
|
|
||||||
ConditionThenError struct {
|
|
||||||
ResultErrorFields
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConditionElseError. ErrorDetails: -
|
|
||||||
ConditionElseError struct {
|
|
||||||
ResultErrorFields
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
// newError takes a ResultError type and sets the type, context, description, details, value, and field
|
|
||||||
func newError(err ResultError, context *JsonContext, value interface{}, locale locale, details ErrorDetails) {
|
|
||||||
var t string
|
|
||||||
var d string
|
|
||||||
switch err.(type) {
|
|
||||||
case *RequiredError:
|
|
||||||
t = "required"
|
|
||||||
d = locale.Required()
|
|
||||||
case *InvalidTypeError:
|
|
||||||
t = "invalid_type"
|
|
||||||
d = locale.InvalidType()
|
|
||||||
case *NumberAnyOfError:
|
|
||||||
t = "number_any_of"
|
|
||||||
d = locale.NumberAnyOf()
|
|
||||||
case *NumberOneOfError:
|
|
||||||
t = "number_one_of"
|
|
||||||
d = locale.NumberOneOf()
|
|
||||||
case *NumberAllOfError:
|
|
||||||
t = "number_all_of"
|
|
||||||
d = locale.NumberAllOf()
|
|
||||||
case *NumberNotError:
|
|
||||||
t = "number_not"
|
|
||||||
d = locale.NumberNot()
|
|
||||||
case *MissingDependencyError:
|
|
||||||
t = "missing_dependency"
|
|
||||||
d = locale.MissingDependency()
|
|
||||||
case *InternalError:
|
|
||||||
t = "internal"
|
|
||||||
d = locale.Internal()
|
|
||||||
case *ConstError:
|
|
||||||
t = "const"
|
|
||||||
d = locale.Const()
|
|
||||||
case *EnumError:
|
|
||||||
t = "enum"
|
|
||||||
d = locale.Enum()
|
|
||||||
case *ArrayNoAdditionalItemsError:
|
|
||||||
t = "array_no_additional_items"
|
|
||||||
d = locale.ArrayNoAdditionalItems()
|
|
||||||
case *ArrayMinItemsError:
|
|
||||||
t = "array_min_items"
|
|
||||||
d = locale.ArrayMinItems()
|
|
||||||
case *ArrayMaxItemsError:
|
|
||||||
t = "array_max_items"
|
|
||||||
d = locale.ArrayMaxItems()
|
|
||||||
case *ItemsMustBeUniqueError:
|
|
||||||
t = "unique"
|
|
||||||
d = locale.Unique()
|
|
||||||
case *ArrayContainsError:
|
|
||||||
t = "contains"
|
|
||||||
d = locale.ArrayContains()
|
|
||||||
case *ArrayMinPropertiesError:
|
|
||||||
t = "array_min_properties"
|
|
||||||
d = locale.ArrayMinProperties()
|
|
||||||
case *ArrayMaxPropertiesError:
|
|
||||||
t = "array_max_properties"
|
|
||||||
d = locale.ArrayMaxProperties()
|
|
||||||
case *AdditionalPropertyNotAllowedError:
|
|
||||||
t = "additional_property_not_allowed"
|
|
||||||
d = locale.AdditionalPropertyNotAllowed()
|
|
||||||
case *InvalidPropertyPatternError:
|
|
||||||
t = "invalid_property_pattern"
|
|
||||||
d = locale.InvalidPropertyPattern()
|
|
||||||
case *InvalidPropertyNameError:
|
|
||||||
t = "invalid_property_name"
|
|
||||||
d = locale.InvalidPropertyName()
|
|
||||||
case *StringLengthGTEError:
|
|
||||||
t = "string_gte"
|
|
||||||
d = locale.StringGTE()
|
|
||||||
case *StringLengthLTEError:
|
|
||||||
t = "string_lte"
|
|
||||||
d = locale.StringLTE()
|
|
||||||
case *DoesNotMatchPatternError:
|
|
||||||
t = "pattern"
|
|
||||||
d = locale.DoesNotMatchPattern()
|
|
||||||
case *DoesNotMatchFormatError:
|
|
||||||
t = "format"
|
|
||||||
d = locale.DoesNotMatchFormat()
|
|
||||||
case *MultipleOfError:
|
|
||||||
t = "multiple_of"
|
|
||||||
d = locale.MultipleOf()
|
|
||||||
case *NumberGTEError:
|
|
||||||
t = "number_gte"
|
|
||||||
d = locale.NumberGTE()
|
|
||||||
case *NumberGTError:
|
|
||||||
t = "number_gt"
|
|
||||||
d = locale.NumberGT()
|
|
||||||
case *NumberLTEError:
|
|
||||||
t = "number_lte"
|
|
||||||
d = locale.NumberLTE()
|
|
||||||
case *NumberLTError:
|
|
||||||
t = "number_lt"
|
|
||||||
d = locale.NumberLT()
|
|
||||||
case *ConditionThenError:
|
|
||||||
t = "condition_then"
|
|
||||||
d = locale.ConditionThen()
|
|
||||||
case *ConditionElseError:
|
|
||||||
t = "condition_else"
|
|
||||||
d = locale.ConditionElse()
|
|
||||||
}
|
|
||||||
|
|
||||||
err.SetType(t)
|
|
||||||
err.SetContext(context)
|
|
||||||
err.SetValue(value)
|
|
||||||
err.SetDetails(details)
|
|
||||||
err.SetDescriptionFormat(d)
|
|
||||||
details["field"] = err.Field()
|
|
||||||
|
|
||||||
if _, exists := details["context"]; !exists && context != nil {
|
|
||||||
details["context"] = context.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
err.SetDescription(formatErrorDescription(err.DescriptionFormat(), details))
|
|
||||||
}
|
|
||||||
|
|
||||||
// formatErrorDescription takes a string in the default text/template
|
|
||||||
// format and converts it to a string with replacements. The fields come
|
|
||||||
// from the ErrorDetails struct and vary for each type of error.
|
|
||||||
func formatErrorDescription(s string, details ErrorDetails) string {
|
|
||||||
|
|
||||||
var tpl *template.Template
|
|
||||||
var descrAsBuffer bytes.Buffer
|
|
||||||
var err error
|
|
||||||
|
|
||||||
errorTemplates.RLock()
|
|
||||||
tpl = errorTemplates.Lookup(s)
|
|
||||||
errorTemplates.RUnlock()
|
|
||||||
|
|
||||||
if tpl == nil {
|
|
||||||
errorTemplates.Lock()
|
|
||||||
tpl = errorTemplates.New(s)
|
|
||||||
|
|
||||||
if ErrorTemplateFuncs != nil {
|
|
||||||
tpl.Funcs(ErrorTemplateFuncs)
|
|
||||||
}
|
|
||||||
|
|
||||||
tpl, err = tpl.Parse(s)
|
|
||||||
errorTemplates.Unlock()
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return err.Error()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
err = tpl.Execute(&descrAsBuffer, details)
|
|
||||||
if err != nil {
|
|
||||||
return err.Error()
|
|
||||||
}
|
|
||||||
|
|
||||||
return descrAsBuffer.String()
|
|
||||||
}
|
|
250
vendor/github.com/xeipuuv/gojsonschema/format_checkers.go
generated
vendored
250
vendor/github.com/xeipuuv/gojsonschema/format_checkers.go
generated
vendored
@ -1,250 +0,0 @@
|
|||||||
package gojsonschema
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net"
|
|
||||||
"net/url"
|
|
||||||
"regexp"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
type (
|
|
||||||
// FormatChecker is the interface all formatters added to FormatCheckerChain must implement
|
|
||||||
FormatChecker interface {
|
|
||||||
IsFormat(input interface{}) bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// FormatCheckerChain holds the formatters
|
|
||||||
FormatCheckerChain struct {
|
|
||||||
formatters map[string]FormatChecker
|
|
||||||
}
|
|
||||||
|
|
||||||
// EmailFormatter verifies email address formats
|
|
||||||
EmailFormatChecker struct{}
|
|
||||||
|
|
||||||
// IPV4FormatChecker verifies IP addresses in the ipv4 format
|
|
||||||
IPV4FormatChecker struct{}
|
|
||||||
|
|
||||||
// IPV6FormatChecker verifies IP addresses in the ipv6 format
|
|
||||||
IPV6FormatChecker struct{}
|
|
||||||
|
|
||||||
// DateTimeFormatChecker verifies date/time formats per RFC3339 5.6
|
|
||||||
//
|
|
||||||
// Valid formats:
|
|
||||||
// Partial Time: HH:MM:SS
|
|
||||||
// Full Date: YYYY-MM-DD
|
|
||||||
// Full Time: HH:MM:SSZ-07:00
|
|
||||||
// Date Time: YYYY-MM-DDTHH:MM:SSZ-0700
|
|
||||||
//
|
|
||||||
// Where
|
|
||||||
// YYYY = 4DIGIT year
|
|
||||||
// MM = 2DIGIT month ; 01-12
|
|
||||||
// DD = 2DIGIT day-month ; 01-28, 01-29, 01-30, 01-31 based on month/year
|
|
||||||
// HH = 2DIGIT hour ; 00-23
|
|
||||||
// MM = 2DIGIT ; 00-59
|
|
||||||
// SS = 2DIGIT ; 00-58, 00-60 based on leap second rules
|
|
||||||
// T = Literal
|
|
||||||
// Z = Literal
|
|
||||||
//
|
|
||||||
// Note: Nanoseconds are also suported in all formats
|
|
||||||
//
|
|
||||||
// http://tools.ietf.org/html/rfc3339#section-5.6
|
|
||||||
DateTimeFormatChecker struct{}
|
|
||||||
|
|
||||||
// URIFormatChecker validates a URI with a valid Scheme per RFC3986
|
|
||||||
URIFormatChecker struct{}
|
|
||||||
|
|
||||||
// URIReferenceFormatChecker validates a URI or relative-reference per RFC3986
|
|
||||||
URIReferenceFormatChecker struct{}
|
|
||||||
|
|
||||||
// HostnameFormatChecker validates a hostname is in the correct format
|
|
||||||
HostnameFormatChecker struct{}
|
|
||||||
|
|
||||||
// UUIDFormatChecker validates a UUID is in the correct format
|
|
||||||
UUIDFormatChecker struct{}
|
|
||||||
|
|
||||||
// RegexFormatChecker validates a regex is in the correct format
|
|
||||||
RegexFormatChecker struct{}
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// Formatters holds the valid formatters, and is a public variable
|
|
||||||
// so library users can add custom formatters
|
|
||||||
FormatCheckers = FormatCheckerChain{
|
|
||||||
formatters: map[string]FormatChecker{
|
|
||||||
"date-time": DateTimeFormatChecker{},
|
|
||||||
"hostname": HostnameFormatChecker{},
|
|
||||||
"email": EmailFormatChecker{},
|
|
||||||
"ipv4": IPV4FormatChecker{},
|
|
||||||
"ipv6": IPV6FormatChecker{},
|
|
||||||
"uri": URIFormatChecker{},
|
|
||||||
"uri-reference": URIReferenceFormatChecker{},
|
|
||||||
"uuid": UUIDFormatChecker{},
|
|
||||||
"regex": RegexFormatChecker{},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Regex credit: https://github.com/asaskevich/govalidator
|
|
||||||
rxEmail = regexp.MustCompile("^(((([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|((\\x22)((((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(([\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(\\([\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(\\x22)))@((([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$")
|
|
||||||
|
|
||||||
// Regex credit: https://www.socketloop.com/tutorials/golang-validate-hostname
|
|
||||||
rxHostname = regexp.MustCompile(`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$`)
|
|
||||||
|
|
||||||
rxUUID = regexp.MustCompile("^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$")
|
|
||||||
)
|
|
||||||
|
|
||||||
// Add adds a FormatChecker to the FormatCheckerChain
|
|
||||||
// The name used will be the value used for the format key in your json schema
|
|
||||||
func (c *FormatCheckerChain) Add(name string, f FormatChecker) *FormatCheckerChain {
|
|
||||||
c.formatters[name] = f
|
|
||||||
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove deletes a FormatChecker from the FormatCheckerChain (if it exists)
|
|
||||||
func (c *FormatCheckerChain) Remove(name string) *FormatCheckerChain {
|
|
||||||
delete(c.formatters, name)
|
|
||||||
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// Has checks to see if the FormatCheckerChain holds a FormatChecker with the given name
|
|
||||||
func (c *FormatCheckerChain) Has(name string) bool {
|
|
||||||
_, ok := c.formatters[name]
|
|
||||||
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsFormat will check an input against a FormatChecker with the given name
|
|
||||||
// to see if it is the correct format
|
|
||||||
func (c *FormatCheckerChain) IsFormat(name string, input interface{}) bool {
|
|
||||||
f, ok := c.formatters[name]
|
|
||||||
|
|
||||||
if !ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return f.IsFormat(input)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f EmailFormatChecker) IsFormat(input interface{}) bool {
|
|
||||||
|
|
||||||
asString, ok := input.(string)
|
|
||||||
if ok == false {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return rxEmail.MatchString(asString)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Credit: https://github.com/asaskevich/govalidator
|
|
||||||
func (f IPV4FormatChecker) IsFormat(input interface{}) bool {
|
|
||||||
|
|
||||||
asString, ok := input.(string)
|
|
||||||
if ok == false {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
ip := net.ParseIP(asString)
|
|
||||||
return ip != nil && strings.Contains(asString, ".")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Credit: https://github.com/asaskevich/govalidator
|
|
||||||
func (f IPV6FormatChecker) IsFormat(input interface{}) bool {
|
|
||||||
|
|
||||||
asString, ok := input.(string)
|
|
||||||
if ok == false {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
ip := net.ParseIP(asString)
|
|
||||||
return ip != nil && strings.Contains(asString, ":")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f DateTimeFormatChecker) IsFormat(input interface{}) bool {
|
|
||||||
|
|
||||||
asString, ok := input.(string)
|
|
||||||
if ok == false {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
formats := []string{
|
|
||||||
"15:04:05",
|
|
||||||
"15:04:05Z07:00",
|
|
||||||
"2006-01-02",
|
|
||||||
time.RFC3339,
|
|
||||||
time.RFC3339Nano,
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, format := range formats {
|
|
||||||
if _, err := time.Parse(format, asString); err == nil {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f URIFormatChecker) IsFormat(input interface{}) bool {
|
|
||||||
|
|
||||||
asString, ok := input.(string)
|
|
||||||
if ok == false {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
u, err := url.Parse(asString)
|
|
||||||
if err != nil || u.Scheme == "" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f URIReferenceFormatChecker) IsFormat(input interface{}) bool {
|
|
||||||
|
|
||||||
asString, ok := input.(string)
|
|
||||||
if ok == false {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err := url.Parse(asString)
|
|
||||||
return err == nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f HostnameFormatChecker) IsFormat(input interface{}) bool {
|
|
||||||
|
|
||||||
asString, ok := input.(string)
|
|
||||||
if ok == false {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return rxHostname.MatchString(asString) && len(asString) < 256
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f UUIDFormatChecker) IsFormat(input interface{}) bool {
|
|
||||||
|
|
||||||
asString, ok := input.(string)
|
|
||||||
if ok == false {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return rxUUID.MatchString(asString)
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsFormat implements FormatChecker interface.
|
|
||||||
func (f RegexFormatChecker) IsFormat(input interface{}) bool {
|
|
||||||
|
|
||||||
asString, ok := input.(string)
|
|
||||||
if ok == false {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if asString == "" {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
_, err := regexp.Compile(asString)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
37
vendor/github.com/xeipuuv/gojsonschema/internalLog.go
generated
vendored
37
vendor/github.com/xeipuuv/gojsonschema/internalLog.go
generated
vendored
@ -1,37 +0,0 @@
|
|||||||
// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// author xeipuuv
|
|
||||||
// author-github https://github.com/xeipuuv
|
|
||||||
// author-mail xeipuuv@gmail.com
|
|
||||||
//
|
|
||||||
// repository-name gojsonschema
|
|
||||||
// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
|
|
||||||
//
|
|
||||||
// description Very simple log wrapper.
|
|
||||||
// Used for debugging/testing purposes.
|
|
||||||
//
|
|
||||||
// created 01-01-2015
|
|
||||||
|
|
||||||
package gojsonschema
|
|
||||||
|
|
||||||
import (
|
|
||||||
"log"
|
|
||||||
)
|
|
||||||
|
|
||||||
const internalLogEnabled = false
|
|
||||||
|
|
||||||
func internalLog(format string, v ...interface{}) {
|
|
||||||
log.Printf(format, v...)
|
|
||||||
}
|
|
72
vendor/github.com/xeipuuv/gojsonschema/jsonContext.go
generated
vendored
72
vendor/github.com/xeipuuv/gojsonschema/jsonContext.go
generated
vendored
@ -1,72 +0,0 @@
|
|||||||
// Copyright 2013 MongoDB, Inc.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// author tolsen
|
|
||||||
// author-github https://github.com/tolsen
|
|
||||||
//
|
|
||||||
// repository-name gojsonschema
|
|
||||||
// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
|
|
||||||
//
|
|
||||||
// description Implements a persistent (immutable w/ shared structure) singly-linked list of strings for the purpose of storing a json context
|
|
||||||
//
|
|
||||||
// created 04-09-2013
|
|
||||||
|
|
||||||
package gojsonschema
|
|
||||||
|
|
||||||
import "bytes"
|
|
||||||
|
|
||||||
// JsonContext implements a persistent linked-list of strings
|
|
||||||
type JsonContext struct {
|
|
||||||
head string
|
|
||||||
tail *JsonContext
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewJsonContext(head string, tail *JsonContext) *JsonContext {
|
|
||||||
return &JsonContext{head, tail}
|
|
||||||
}
|
|
||||||
|
|
||||||
// String displays the context in reverse.
|
|
||||||
// This plays well with the data structure's persistent nature with
|
|
||||||
// Cons and a json document's tree structure.
|
|
||||||
func (c *JsonContext) String(del ...string) string {
|
|
||||||
byteArr := make([]byte, 0, c.stringLen())
|
|
||||||
buf := bytes.NewBuffer(byteArr)
|
|
||||||
c.writeStringToBuffer(buf, del)
|
|
||||||
|
|
||||||
return buf.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *JsonContext) stringLen() int {
|
|
||||||
length := 0
|
|
||||||
if c.tail != nil {
|
|
||||||
length = c.tail.stringLen() + 1 // add 1 for "."
|
|
||||||
}
|
|
||||||
|
|
||||||
length += len(c.head)
|
|
||||||
return length
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *JsonContext) writeStringToBuffer(buf *bytes.Buffer, del []string) {
|
|
||||||
if c.tail != nil {
|
|
||||||
c.tail.writeStringToBuffer(buf, del)
|
|
||||||
|
|
||||||
if len(del) > 0 {
|
|
||||||
buf.WriteString(del[0])
|
|
||||||
} else {
|
|
||||||
buf.WriteString(".")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
buf.WriteString(c.head)
|
|
||||||
}
|
|
364
vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go
generated
vendored
364
vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go
generated
vendored
@ -1,364 +0,0 @@
|
|||||||
// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// author xeipuuv
|
|
||||||
// author-github https://github.com/xeipuuv
|
|
||||||
// author-mail xeipuuv@gmail.com
|
|
||||||
//
|
|
||||||
// repository-name gojsonschema
|
|
||||||
// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
|
|
||||||
//
|
|
||||||
// description Different strategies to load JSON files.
|
|
||||||
// Includes References (file and HTTP), JSON strings and Go types.
|
|
||||||
//
|
|
||||||
// created 01-02-2015
|
|
||||||
|
|
||||||
package gojsonschema
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"runtime"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/xeipuuv/gojsonreference"
|
|
||||||
)
|
|
||||||
|
|
||||||
var osFS = osFileSystem(os.Open)
|
|
||||||
|
|
||||||
// JSON loader interface
|
|
||||||
|
|
||||||
type JSONLoader interface {
|
|
||||||
JsonSource() interface{}
|
|
||||||
LoadJSON() (interface{}, error)
|
|
||||||
JsonReference() (gojsonreference.JsonReference, error)
|
|
||||||
LoaderFactory() JSONLoaderFactory
|
|
||||||
}
|
|
||||||
|
|
||||||
type JSONLoaderFactory interface {
|
|
||||||
New(source string) JSONLoader
|
|
||||||
}
|
|
||||||
|
|
||||||
type DefaultJSONLoaderFactory struct {
|
|
||||||
}
|
|
||||||
|
|
||||||
type FileSystemJSONLoaderFactory struct {
|
|
||||||
fs http.FileSystem
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d DefaultJSONLoaderFactory) New(source string) JSONLoader {
|
|
||||||
return &jsonReferenceLoader{
|
|
||||||
fs: osFS,
|
|
||||||
source: source,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f FileSystemJSONLoaderFactory) New(source string) JSONLoader {
|
|
||||||
return &jsonReferenceLoader{
|
|
||||||
fs: f.fs,
|
|
||||||
source: source,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// osFileSystem is a functional wrapper for os.Open that implements http.FileSystem.
|
|
||||||
type osFileSystem func(string) (*os.File, error)
|
|
||||||
|
|
||||||
func (o osFileSystem) Open(name string) (http.File, error) {
|
|
||||||
return o(name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// JSON Reference loader
|
|
||||||
// references are used to load JSONs from files and HTTP
|
|
||||||
|
|
||||||
type jsonReferenceLoader struct {
|
|
||||||
fs http.FileSystem
|
|
||||||
source string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *jsonReferenceLoader) JsonSource() interface{} {
|
|
||||||
return l.source
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *jsonReferenceLoader) JsonReference() (gojsonreference.JsonReference, error) {
|
|
||||||
return gojsonreference.NewJsonReference(l.JsonSource().(string))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *jsonReferenceLoader) LoaderFactory() JSONLoaderFactory {
|
|
||||||
return &FileSystemJSONLoaderFactory{
|
|
||||||
fs: l.fs,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewReferenceLoader returns a JSON reference loader using the given source and the local OS file system.
|
|
||||||
func NewReferenceLoader(source string) *jsonReferenceLoader {
|
|
||||||
return &jsonReferenceLoader{
|
|
||||||
fs: osFS,
|
|
||||||
source: source,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewReferenceLoaderFileSystem returns a JSON reference loader using the given source and file system.
|
|
||||||
func NewReferenceLoaderFileSystem(source string, fs http.FileSystem) *jsonReferenceLoader {
|
|
||||||
return &jsonReferenceLoader{
|
|
||||||
fs: fs,
|
|
||||||
source: source,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *jsonReferenceLoader) LoadJSON() (interface{}, error) {
|
|
||||||
|
|
||||||
var err error
|
|
||||||
|
|
||||||
reference, err := gojsonreference.NewJsonReference(l.JsonSource().(string))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
refToUrl := reference
|
|
||||||
refToUrl.GetUrl().Fragment = ""
|
|
||||||
|
|
||||||
var document interface{}
|
|
||||||
|
|
||||||
if reference.HasFileScheme {
|
|
||||||
|
|
||||||
filename := strings.Replace(refToUrl.GetUrl().Path, "file://", "", -1)
|
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
// on Windows, a file URL may have an extra leading slash, use slashes
|
|
||||||
// instead of backslashes, and have spaces escaped
|
|
||||||
if strings.HasPrefix(filename, "/") {
|
|
||||||
filename = filename[1:]
|
|
||||||
}
|
|
||||||
filename = filepath.FromSlash(filename)
|
|
||||||
}
|
|
||||||
|
|
||||||
document, err = l.loadFromFile(filename)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
} else {
|
|
||||||
|
|
||||||
document, err = l.loadFromHTTP(refToUrl.String())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
return document, nil
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *jsonReferenceLoader) loadFromHTTP(address string) (interface{}, error) {
|
|
||||||
|
|
||||||
resp, err := http.Get(address)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// must return HTTP Status 200 OK
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
|
||||||
return nil, errors.New(formatErrorDescription(Locale.HttpBadStatus(), ErrorDetails{"status": resp.Status}))
|
|
||||||
}
|
|
||||||
|
|
||||||
bodyBuff, err := ioutil.ReadAll(resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return decodeJsonUsingNumber(bytes.NewReader(bodyBuff))
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *jsonReferenceLoader) loadFromFile(path string) (interface{}, error) {
|
|
||||||
f, err := l.fs.Open(path)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
bodyBuff, err := ioutil.ReadAll(f)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return decodeJsonUsingNumber(bytes.NewReader(bodyBuff))
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// JSON string loader
|
|
||||||
|
|
||||||
type jsonStringLoader struct {
|
|
||||||
source string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *jsonStringLoader) JsonSource() interface{} {
|
|
||||||
return l.source
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *jsonStringLoader) JsonReference() (gojsonreference.JsonReference, error) {
|
|
||||||
return gojsonreference.NewJsonReference("#")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *jsonStringLoader) LoaderFactory() JSONLoaderFactory {
|
|
||||||
return &DefaultJSONLoaderFactory{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewStringLoader(source string) *jsonStringLoader {
|
|
||||||
return &jsonStringLoader{source: source}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *jsonStringLoader) LoadJSON() (interface{}, error) {
|
|
||||||
|
|
||||||
return decodeJsonUsingNumber(strings.NewReader(l.JsonSource().(string)))
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// JSON bytes loader
|
|
||||||
|
|
||||||
type jsonBytesLoader struct {
|
|
||||||
source []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *jsonBytesLoader) JsonSource() interface{} {
|
|
||||||
return l.source
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *jsonBytesLoader) JsonReference() (gojsonreference.JsonReference, error) {
|
|
||||||
return gojsonreference.NewJsonReference("#")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *jsonBytesLoader) LoaderFactory() JSONLoaderFactory {
|
|
||||||
return &DefaultJSONLoaderFactory{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewBytesLoader(source []byte) *jsonBytesLoader {
|
|
||||||
return &jsonBytesLoader{source: source}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *jsonBytesLoader) LoadJSON() (interface{}, error) {
|
|
||||||
return decodeJsonUsingNumber(bytes.NewReader(l.JsonSource().([]byte)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// JSON Go (types) loader
|
|
||||||
// used to load JSONs from the code as maps, interface{}, structs ...
|
|
||||||
|
|
||||||
type jsonGoLoader struct {
|
|
||||||
source interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *jsonGoLoader) JsonSource() interface{} {
|
|
||||||
return l.source
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *jsonGoLoader) JsonReference() (gojsonreference.JsonReference, error) {
|
|
||||||
return gojsonreference.NewJsonReference("#")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *jsonGoLoader) LoaderFactory() JSONLoaderFactory {
|
|
||||||
return &DefaultJSONLoaderFactory{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewGoLoader(source interface{}) *jsonGoLoader {
|
|
||||||
return &jsonGoLoader{source: source}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *jsonGoLoader) LoadJSON() (interface{}, error) {
|
|
||||||
|
|
||||||
// convert it to a compliant JSON first to avoid types "mismatches"
|
|
||||||
|
|
||||||
jsonBytes, err := json.Marshal(l.JsonSource())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return decodeJsonUsingNumber(bytes.NewReader(jsonBytes))
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
type jsonIOLoader struct {
|
|
||||||
buf *bytes.Buffer
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewReaderLoader(source io.Reader) (*jsonIOLoader, io.Reader) {
|
|
||||||
buf := &bytes.Buffer{}
|
|
||||||
return &jsonIOLoader{buf: buf}, io.TeeReader(source, buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewWriterLoader(source io.Writer) (*jsonIOLoader, io.Writer) {
|
|
||||||
buf := &bytes.Buffer{}
|
|
||||||
return &jsonIOLoader{buf: buf}, io.MultiWriter(source, buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *jsonIOLoader) JsonSource() interface{} {
|
|
||||||
return l.buf.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *jsonIOLoader) LoadJSON() (interface{}, error) {
|
|
||||||
return decodeJsonUsingNumber(l.buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *jsonIOLoader) JsonReference() (gojsonreference.JsonReference, error) {
|
|
||||||
return gojsonreference.NewJsonReference("#")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *jsonIOLoader) LoaderFactory() JSONLoaderFactory {
|
|
||||||
return &DefaultJSONLoaderFactory{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// JSON raw loader
|
|
||||||
// In case the JSON is already marshalled to interface{} use this loader
|
|
||||||
// This is used for testing as otherwise there is no guarantee the JSON is marshalled
|
|
||||||
// "properly" by using https://golang.org/pkg/encoding/json/#Decoder.UseNumber
|
|
||||||
type jsonRawLoader struct {
|
|
||||||
source interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewRawLoader(source interface{}) *jsonRawLoader {
|
|
||||||
return &jsonRawLoader{source: source}
|
|
||||||
}
|
|
||||||
func (l *jsonRawLoader) JsonSource() interface{} {
|
|
||||||
return l.source
|
|
||||||
}
|
|
||||||
func (l *jsonRawLoader) LoadJSON() (interface{}, error) {
|
|
||||||
return l.source, nil
|
|
||||||
}
|
|
||||||
func (l *jsonRawLoader) JsonReference() (gojsonreference.JsonReference, error) {
|
|
||||||
return gojsonreference.NewJsonReference("#")
|
|
||||||
}
|
|
||||||
func (l *jsonRawLoader) LoaderFactory() JSONLoaderFactory {
|
|
||||||
return &DefaultJSONLoaderFactory{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func decodeJsonUsingNumber(r io.Reader) (interface{}, error) {
|
|
||||||
|
|
||||||
var document interface{}
|
|
||||||
|
|
||||||
decoder := json.NewDecoder(r)
|
|
||||||
decoder.UseNumber()
|
|
||||||
|
|
||||||
err := decoder.Decode(&document)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return document, nil
|
|
||||||
|
|
||||||
}
|
|
313
vendor/github.com/xeipuuv/gojsonschema/locales.go
generated
vendored
313
vendor/github.com/xeipuuv/gojsonschema/locales.go
generated
vendored
@ -1,313 +0,0 @@
|
|||||||
// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// author xeipuuv
|
|
||||||
// author-github https://github.com/xeipuuv
|
|
||||||
// author-mail xeipuuv@gmail.com
|
|
||||||
//
|
|
||||||
// repository-name gojsonschema
|
|
||||||
// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
|
|
||||||
//
|
|
||||||
// description Contains const string and messages.
|
|
||||||
//
|
|
||||||
// created 01-01-2015
|
|
||||||
|
|
||||||
package gojsonschema
|
|
||||||
|
|
||||||
type (
|
|
||||||
// locale is an interface for defining custom error strings
|
|
||||||
locale interface {
|
|
||||||
Required() string
|
|
||||||
InvalidType() string
|
|
||||||
NumberAnyOf() string
|
|
||||||
NumberOneOf() string
|
|
||||||
NumberAllOf() string
|
|
||||||
NumberNot() string
|
|
||||||
MissingDependency() string
|
|
||||||
Internal() string
|
|
||||||
Const() string
|
|
||||||
Enum() string
|
|
||||||
ArrayNotEnoughItems() string
|
|
||||||
ArrayNoAdditionalItems() string
|
|
||||||
ArrayMinItems() string
|
|
||||||
ArrayMaxItems() string
|
|
||||||
Unique() string
|
|
||||||
ArrayContains() string
|
|
||||||
ArrayMinProperties() string
|
|
||||||
ArrayMaxProperties() string
|
|
||||||
AdditionalPropertyNotAllowed() string
|
|
||||||
InvalidPropertyPattern() string
|
|
||||||
InvalidPropertyName() string
|
|
||||||
StringGTE() string
|
|
||||||
StringLTE() string
|
|
||||||
DoesNotMatchPattern() string
|
|
||||||
DoesNotMatchFormat() string
|
|
||||||
MultipleOf() string
|
|
||||||
NumberGTE() string
|
|
||||||
NumberGT() string
|
|
||||||
NumberLTE() string
|
|
||||||
NumberLT() string
|
|
||||||
|
|
||||||
// Schema validations
|
|
||||||
RegexPattern() string
|
|
||||||
GreaterThanZero() string
|
|
||||||
MustBeOfA() string
|
|
||||||
MustBeOfAn() string
|
|
||||||
CannotBeUsedWithout() string
|
|
||||||
CannotBeGT() string
|
|
||||||
MustBeOfType() string
|
|
||||||
MustBeValidRegex() string
|
|
||||||
MustBeValidFormat() string
|
|
||||||
MustBeGTEZero() string
|
|
||||||
KeyCannotBeGreaterThan() string
|
|
||||||
KeyItemsMustBeOfType() string
|
|
||||||
KeyItemsMustBeUnique() string
|
|
||||||
ReferenceMustBeCanonical() string
|
|
||||||
NotAValidType() string
|
|
||||||
Duplicated() string
|
|
||||||
HttpBadStatus() string
|
|
||||||
ParseError() string
|
|
||||||
|
|
||||||
ConditionThen() string
|
|
||||||
ConditionElse() string
|
|
||||||
|
|
||||||
// ErrorFormat
|
|
||||||
ErrorFormat() string
|
|
||||||
}
|
|
||||||
|
|
||||||
// DefaultLocale is the default locale for this package
|
|
||||||
DefaultLocale struct{}
|
|
||||||
)
|
|
||||||
|
|
||||||
func (l DefaultLocale) Required() string {
|
|
||||||
return `{{.property}} is required`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l DefaultLocale) InvalidType() string {
|
|
||||||
return `Invalid type. Expected: {{.expected}}, given: {{.given}}`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l DefaultLocale) NumberAnyOf() string {
|
|
||||||
return `Must validate at least one schema (anyOf)`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l DefaultLocale) NumberOneOf() string {
|
|
||||||
return `Must validate one and only one schema (oneOf)`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l DefaultLocale) NumberAllOf() string {
|
|
||||||
return `Must validate all the schemas (allOf)`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l DefaultLocale) NumberNot() string {
|
|
||||||
return `Must not validate the schema (not)`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l DefaultLocale) MissingDependency() string {
|
|
||||||
return `Has a dependency on {{.dependency}}`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l DefaultLocale) Internal() string {
|
|
||||||
return `Internal Error {{.error}}`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l DefaultLocale) Const() string {
|
|
||||||
return `{{.field}} does not match: {{.allowed}}`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l DefaultLocale) Enum() string {
|
|
||||||
return `{{.field}} must be one of the following: {{.allowed}}`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l DefaultLocale) ArrayNoAdditionalItems() string {
|
|
||||||
return `No additional items allowed on array`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l DefaultLocale) ArrayNotEnoughItems() string {
|
|
||||||
return `Not enough items on array to match positional list of schema`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l DefaultLocale) ArrayMinItems() string {
|
|
||||||
return `Array must have at least {{.min}} items`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l DefaultLocale) ArrayMaxItems() string {
|
|
||||||
return `Array must have at most {{.max}} items`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l DefaultLocale) Unique() string {
|
|
||||||
return `{{.type}} items must be unique`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l DefaultLocale) ArrayContains() string {
|
|
||||||
return `At least one of the items must match`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l DefaultLocale) ArrayMinProperties() string {
|
|
||||||
return `Must have at least {{.min}} properties`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l DefaultLocale) ArrayMaxProperties() string {
|
|
||||||
return `Must have at most {{.max}} properties`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l DefaultLocale) AdditionalPropertyNotAllowed() string {
|
|
||||||
return `Additional property {{.property}} is not allowed`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l DefaultLocale) InvalidPropertyPattern() string {
|
|
||||||
return `Property "{{.property}}" does not match pattern {{.pattern}}`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l DefaultLocale) InvalidPropertyName() string {
|
|
||||||
return `Property name of "{{.property}}" does not match`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l DefaultLocale) StringGTE() string {
|
|
||||||
return `String length must be greater than or equal to {{.min}}`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l DefaultLocale) StringLTE() string {
|
|
||||||
return `String length must be less than or equal to {{.max}}`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l DefaultLocale) DoesNotMatchPattern() string {
|
|
||||||
return `Does not match pattern '{{.pattern}}'`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l DefaultLocale) DoesNotMatchFormat() string {
|
|
||||||
return `Does not match format '{{.format}}'`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l DefaultLocale) MultipleOf() string {
|
|
||||||
return `Must be a multiple of {{.multiple}}`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l DefaultLocale) NumberGTE() string {
|
|
||||||
return `Must be greater than or equal to {{.min}}`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l DefaultLocale) NumberGT() string {
|
|
||||||
return `Must be greater than {{.min}}`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l DefaultLocale) NumberLTE() string {
|
|
||||||
return `Must be less than or equal to {{.max}}`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l DefaultLocale) NumberLT() string {
|
|
||||||
return `Must be less than {{.max}}`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Schema validators
|
|
||||||
func (l DefaultLocale) RegexPattern() string {
|
|
||||||
return `Invalid regex pattern '{{.pattern}}'`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l DefaultLocale) GreaterThanZero() string {
|
|
||||||
return `{{.number}} must be strictly greater than 0`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l DefaultLocale) MustBeOfA() string {
|
|
||||||
return `{{.x}} must be of a {{.y}}`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l DefaultLocale) MustBeOfAn() string {
|
|
||||||
return `{{.x}} must be of an {{.y}}`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l DefaultLocale) CannotBeUsedWithout() string {
|
|
||||||
return `{{.x}} cannot be used without {{.y}}`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l DefaultLocale) CannotBeGT() string {
|
|
||||||
return `{{.x}} cannot be greater than {{.y}}`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l DefaultLocale) MustBeOfType() string {
|
|
||||||
return `{{.key}} must be of type {{.type}}`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l DefaultLocale) MustBeValidRegex() string {
|
|
||||||
return `{{.key}} must be a valid regex`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l DefaultLocale) MustBeValidFormat() string {
|
|
||||||
return `{{.key}} must be a valid format {{.given}}`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l DefaultLocale) MustBeGTEZero() string {
|
|
||||||
return `{{.key}} must be greater than or equal to 0`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l DefaultLocale) KeyCannotBeGreaterThan() string {
|
|
||||||
return `{{.key}} cannot be greater than {{.y}}`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l DefaultLocale) KeyItemsMustBeOfType() string {
|
|
||||||
return `{{.key}} items must be {{.type}}`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l DefaultLocale) KeyItemsMustBeUnique() string {
|
|
||||||
return `{{.key}} items must be unique`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l DefaultLocale) ReferenceMustBeCanonical() string {
|
|
||||||
return `Reference {{.reference}} must be canonical`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l DefaultLocale) NotAValidType() string {
|
|
||||||
return `has a primitive type that is NOT VALID -- given: {{.given}} Expected valid values are:{{.expected}}`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l DefaultLocale) Duplicated() string {
|
|
||||||
return `{{.type}} type is duplicated`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l DefaultLocale) HttpBadStatus() string {
|
|
||||||
return `Could not read schema from HTTP, response status is {{.status}}`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Replacement options: field, description, context, value
|
|
||||||
func (l DefaultLocale) ErrorFormat() string {
|
|
||||||
return `{{.field}}: {{.description}}`
|
|
||||||
}
|
|
||||||
|
|
||||||
//Parse error
|
|
||||||
func (l DefaultLocale) ParseError() string {
|
|
||||||
return `Expected: {{.expected}}, given: Invalid JSON`
|
|
||||||
}
|
|
||||||
|
|
||||||
//If/Else
|
|
||||||
func (l DefaultLocale) ConditionThen() string {
|
|
||||||
return `Must validate "then" as "if" was valid`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l DefaultLocale) ConditionElse() string {
|
|
||||||
return `Must validate "else" as "if" was not valid`
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
STRING_NUMBER = "number"
|
|
||||||
STRING_ARRAY_OF_STRINGS = "array of strings"
|
|
||||||
STRING_ARRAY_OF_SCHEMAS = "array of schemas"
|
|
||||||
STRING_SCHEMA = "valid schema"
|
|
||||||
STRING_SCHEMA_OR_ARRAY_OF_STRINGS = "schema or array of strings"
|
|
||||||
STRING_PROPERTIES = "properties"
|
|
||||||
STRING_DEPENDENCY = "dependency"
|
|
||||||
STRING_PROPERTY = "property"
|
|
||||||
STRING_UNDEFINED = "undefined"
|
|
||||||
STRING_CONTEXT_ROOT = "(root)"
|
|
||||||
STRING_ROOT_SCHEMA_PROPERTY = "(root)"
|
|
||||||
)
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user