Merge pull request #19 from Random-Liu/add-sandbox-implementation
Add sandbox implementation
This commit is contained in:
commit
8fa87a1754
45
Godeps/Godeps.json
generated
45
Godeps/Godeps.json
generated
@ -151,6 +151,21 @@
|
|||||||
"Comment": "v1.1.0",
|
"Comment": "v1.1.0",
|
||||||
"Rev": "346938d642f2ec3594ed81d874461961cd0faa76"
|
"Rev": "346938d642f2ec3594ed81d874461961cd0faa76"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/docker/docker/pkg/random",
|
||||||
|
"Comment": "v1.13.1",
|
||||||
|
"Rev": "092cba3727bb9b4a2f0e922cd6c0f93ea270e363"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/docker/docker/pkg/stringid",
|
||||||
|
"Comment": "v1.13.1",
|
||||||
|
"Rev": "092cba3727bb9b4a2f0e922cd6c0f93ea270e363"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/docker/docker/pkg/truncindex",
|
||||||
|
"Comment": "v1.13.1",
|
||||||
|
"Rev": "092cba3727bb9b4a2f0e922cd6c0f93ea270e363"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/gogo/protobuf/gogoproto",
|
"ImportPath": "github.com/gogo/protobuf/gogoproto",
|
||||||
"Comment": "v0.3-150-gd2e1ade2",
|
"Comment": "v0.3-150-gd2e1ade2",
|
||||||
@ -221,6 +236,18 @@
|
|||||||
"Comment": "v1.0.0-rc5",
|
"Comment": "v1.0.0-rc5",
|
||||||
"Rev": "035da1dca3dfbb00d752eb58b0b158d6129f3776"
|
"Rev": "035da1dca3dfbb00d752eb58b0b158d6129f3776"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/opencontainers/runtime-tools/generate",
|
||||||
|
"Rev": "8addcc695096a0fc61010af8766952546bba7cd0"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/opencontainers/runtime-tools/generate/seccomp",
|
||||||
|
"Rev": "8addcc695096a0fc61010af8766952546bba7cd0"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/opencontainers/runtime-tools/validate",
|
||||||
|
"Rev": "8addcc695096a0fc61010af8766952546bba7cd0"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/pkg/errors",
|
"ImportPath": "github.com/pkg/errors",
|
||||||
"Comment": "v0.8.0",
|
"Comment": "v0.8.0",
|
||||||
@ -243,6 +270,24 @@
|
|||||||
"Comment": "v1.1.4-6-g18a02ba",
|
"Comment": "v1.1.4-6-g18a02ba",
|
||||||
"Rev": "18a02ba4a312f95da08ff4cfc0055750ce50ae9e"
|
"Rev": "18a02ba4a312f95da08ff4cfc0055750ce50ae9e"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/stretchr/testify/require",
|
||||||
|
"Comment": "v1.1.4-6-g18a02ba",
|
||||||
|
"Rev": "18a02ba4a312f95da08ff4cfc0055750ce50ae9e"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/syndtr/gocapability/capability",
|
||||||
|
"Rev": "e7cb7fa329f456b3855136a2642b197bad7366ba"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/tchap/go-patricia/patricia",
|
||||||
|
"Comment": "v2.2.6",
|
||||||
|
"Rev": "666120de432aea38ab06bd5c818f04f4129882c9"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/tonistiigi/fifo",
|
||||||
|
"Rev": "fe870ccf293940774c2b44e23f6c71fff8f7547d"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/context",
|
"ImportPath": "golang.org/x/net/context",
|
||||||
"Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0"
|
"Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0"
|
||||||
|
@ -32,20 +32,20 @@ func main() {
|
|||||||
o.AddFlags(pflag.CommandLine)
|
o.AddFlags(pflag.CommandLine)
|
||||||
options.InitFlags()
|
options.InitFlags()
|
||||||
|
|
||||||
if o.CRIContainerdVersion {
|
if o.PrintVersion {
|
||||||
version.PrintVersion()
|
version.PrintVersion()
|
||||||
os.Exit(0)
|
os.Exit(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.V(2).Infof("Connect to containerd socket %q with timeout %v", o.ContainerdSocketPath, o.ContainerdConnectionTimeout)
|
glog.V(2).Infof("Connect to containerd endpoint %q with timeout %v", o.ContainerdEndpoint, o.ContainerdConnectionTimeout)
|
||||||
conn, err := server.ConnectToContainerd(o.ContainerdSocketPath, o.ContainerdConnectionTimeout)
|
conn, err := server.ConnectToContainerd(o.ContainerdEndpoint, o.ContainerdConnectionTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Exitf("Failed to connect containerd socket %q: %v", o.ContainerdSocketPath, err)
|
glog.Exitf("Failed to connect containerd endpoint %q: %v", o.ContainerdEndpoint, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.V(2).Infof("Run cri-containerd grpc server on socket %q", o.CRIContainerdSocketPath)
|
glog.V(2).Infof("Run cri-containerd grpc server on socket %q", o.SocketPath)
|
||||||
service := server.NewCRIContainerdService(conn)
|
service := server.NewCRIContainerdService(conn, o.RootDir)
|
||||||
s := server.NewCRIContainerdServer(o.CRIContainerdSocketPath, service, service)
|
s := server.NewCRIContainerdServer(o.SocketPath, service, service)
|
||||||
if err := s.Run(); err != nil {
|
if err := s.Run(); err != nil {
|
||||||
glog.Exitf("Failed to run cri-containerd grpc server: %v", err)
|
glog.Exitf("Failed to run cri-containerd grpc server: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -25,12 +25,15 @@ import (
|
|||||||
|
|
||||||
// CRIContainerdOptions contains cri-containerd command line options.
|
// CRIContainerdOptions contains cri-containerd command line options.
|
||||||
type CRIContainerdOptions struct {
|
type CRIContainerdOptions struct {
|
||||||
// CRIContainerdSocketPath is the path to the socket which cri-containerd serves on.
|
// SocketPath is the path to the socket which cri-containerd serves on.
|
||||||
CRIContainerdSocketPath string
|
SocketPath string
|
||||||
// CRIContainerdVersion is the git release version of cri-containerd
|
// RootDir is the root directory path for managing cri-containerd files
|
||||||
CRIContainerdVersion bool
|
// (metadata checkpoint etc.)
|
||||||
// ContainerdSocketPath is the path to the containerd socket.
|
RootDir string
|
||||||
ContainerdSocketPath string
|
// PrintVersion indicates to print version information of cri-containerd.
|
||||||
|
PrintVersion bool
|
||||||
|
// ContainerdEndpoint is the containerd endpoint path.
|
||||||
|
ContainerdEndpoint string
|
||||||
// ContainerdConnectionTimeout is the connection timeout for containerd client.
|
// ContainerdConnectionTimeout is the connection timeout for containerd client.
|
||||||
ContainerdConnectionTimeout time.Duration
|
ContainerdConnectionTimeout time.Duration
|
||||||
}
|
}
|
||||||
@ -42,13 +45,15 @@ func NewCRIContainerdOptions() *CRIContainerdOptions {
|
|||||||
|
|
||||||
// AddFlags adds cri-containerd command line options to pflag.
|
// AddFlags adds cri-containerd command line options to pflag.
|
||||||
func (c *CRIContainerdOptions) AddFlags(fs *pflag.FlagSet) {
|
func (c *CRIContainerdOptions) AddFlags(fs *pflag.FlagSet) {
|
||||||
fs.StringVar(&c.CRIContainerdSocketPath, "cri-containerd-socket",
|
fs.StringVar(&c.SocketPath, "socket-path",
|
||||||
"/var/run/cri-containerd.sock", "Path to the socket which cri-containerd serves on.")
|
"/var/run/cri-containerd.sock", "Path to the socket which cri-containerd serves on.")
|
||||||
fs.StringVar(&c.ContainerdSocketPath, "containerd-socket",
|
fs.StringVar(&c.RootDir, "root-dir",
|
||||||
"/run/containerd/containerd.sock", "Path to the containerd socket.")
|
"/var/lib/cri-containerd", "Root directory path for cri-containerd managed files (metadata checkpoint etc).")
|
||||||
|
fs.StringVar(&c.ContainerdEndpoint, "containerd-endpoint",
|
||||||
|
"/run/containerd/containerd.sock", "Path to the containerd endpoint.")
|
||||||
fs.DurationVar(&c.ContainerdConnectionTimeout, "containerd-connection-timeout",
|
fs.DurationVar(&c.ContainerdConnectionTimeout, "containerd-connection-timeout",
|
||||||
2*time.Minute, "Connection timeout for containerd client.")
|
2*time.Minute, "Connection timeout for containerd client.")
|
||||||
fs.BoolVar(&c.CRIContainerdVersion, "version",
|
fs.BoolVar(&c.PrintVersion, "version",
|
||||||
false, "Print cri-containerd version information and quit.")
|
false, "Print cri-containerd version information and quit.")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
11
pkg/os/os.go
11
pkg/os/os.go
@ -17,7 +17,12 @@ limitations under the License.
|
|||||||
package os
|
package os
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"io"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"github.com/tonistiigi/fifo"
|
||||||
)
|
)
|
||||||
|
|
||||||
// OS collects system level operations that need to be mocked out
|
// OS collects system level operations that need to be mocked out
|
||||||
@ -25,6 +30,7 @@ import (
|
|||||||
type OS interface {
|
type OS interface {
|
||||||
MkdirAll(path string, perm os.FileMode) error
|
MkdirAll(path string, perm os.FileMode) error
|
||||||
RemoveAll(path string) error
|
RemoveAll(path string) error
|
||||||
|
OpenFifo(ctx context.Context, fn string, flag int, perm os.FileMode) (io.ReadWriteCloser, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// RealOS is used to dispatch the real system level operations.
|
// RealOS is used to dispatch the real system level operations.
|
||||||
@ -39,3 +45,8 @@ func (RealOS) MkdirAll(path string, perm os.FileMode) error {
|
|||||||
func (RealOS) RemoveAll(path string) error {
|
func (RealOS) RemoveAll(path string) error {
|
||||||
return os.RemoveAll(path)
|
return os.RemoveAll(path)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// OpenFifo will call fifo.OpenFifo to open a fifo.
|
||||||
|
func (RealOS) OpenFifo(ctx context.Context, fn string, flag int, perm os.FileMode) (io.ReadWriteCloser, error) {
|
||||||
|
return fifo.OpenFifo(ctx, fn, flag, perm)
|
||||||
|
}
|
||||||
|
@ -17,8 +17,11 @@ limitations under the License.
|
|||||||
package testing
|
package testing
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"io"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
osInterface "github.com/kubernetes-incubator/cri-containerd/pkg/os"
|
osInterface "github.com/kubernetes-incubator/cri-containerd/pkg/os"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -28,6 +31,7 @@ import (
|
|||||||
type FakeOS struct {
|
type FakeOS struct {
|
||||||
MkdirAllFn func(string, os.FileMode) error
|
MkdirAllFn func(string, os.FileMode) error
|
||||||
RemoveAllFn func(string) error
|
RemoveAllFn func(string) error
|
||||||
|
OpenFifoFn func(context.Context, string, int, os.FileMode) (io.ReadWriteCloser, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ osInterface.OS = &FakeOS{}
|
var _ osInterface.OS = &FakeOS{}
|
||||||
@ -52,3 +56,11 @@ func (f *FakeOS) RemoveAll(path string) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// OpenFifo is a fake call that invokes OpenFifoFn or just returns nil.
|
||||||
|
func (f *FakeOS) OpenFifo(ctx context.Context, fn string, flag int, perm os.FileMode) (io.ReadWriteCloser, error) {
|
||||||
|
if f.OpenFifoFn != nil {
|
||||||
|
return f.OpenFifoFn(ctx, fn, flag, perm)
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
101
pkg/registrar/registrar.go
Normal file
101
pkg/registrar/registrar.go
Normal file
@ -0,0 +1,101 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package registrar
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Registrar stores one-to-one name<->key mappings.
|
||||||
|
// Names and keys must be unique.
|
||||||
|
// Registrar is safe for concurrent access.
|
||||||
|
type Registrar struct {
|
||||||
|
lock sync.Mutex
|
||||||
|
nameToKey map[string]string
|
||||||
|
keyToName map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewRegistrar creates a new Registrar with the empty indexes.
|
||||||
|
func NewRegistrar() *Registrar {
|
||||||
|
return &Registrar{
|
||||||
|
nameToKey: make(map[string]string),
|
||||||
|
keyToName: make(map[string]string),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reserve registers a name<->key mapping, name or key must not
|
||||||
|
// be empty.
|
||||||
|
// Reserve is idempotent.
|
||||||
|
// Attempting to reserve a conflict key<->name mapping results
|
||||||
|
// in an error.
|
||||||
|
// A name<->key reservation is globally unique.
|
||||||
|
func (r *Registrar) Reserve(name, key string) error {
|
||||||
|
r.lock.Lock()
|
||||||
|
defer r.lock.Unlock()
|
||||||
|
|
||||||
|
if name == "" || key == "" {
|
||||||
|
return fmt.Errorf("invalid name %q or key %q", name, key)
|
||||||
|
}
|
||||||
|
|
||||||
|
if k, exists := r.nameToKey[name]; exists {
|
||||||
|
if k != key {
|
||||||
|
return fmt.Errorf("name %q is reserved for %q", name, k)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if n, exists := r.keyToName[key]; exists {
|
||||||
|
if n != name {
|
||||||
|
return fmt.Errorf("key %q is reserved for %q", key, n)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
r.nameToKey[name] = key
|
||||||
|
r.keyToName[key] = name
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReleaseByName releases the reserved name<->key mapping by name.
|
||||||
|
// Once released, the name and the key can be reserved again.
|
||||||
|
func (r *Registrar) ReleaseByName(name string) {
|
||||||
|
r.lock.Lock()
|
||||||
|
defer r.lock.Unlock()
|
||||||
|
|
||||||
|
key, exists := r.nameToKey[name]
|
||||||
|
if !exists {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
delete(r.nameToKey, name)
|
||||||
|
delete(r.keyToName, key)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReleaseByKey release the reserved name<->key mapping by key.
|
||||||
|
func (r *Registrar) ReleaseByKey(key string) {
|
||||||
|
r.lock.Lock()
|
||||||
|
defer r.lock.Unlock()
|
||||||
|
|
||||||
|
name, exists := r.keyToName[key]
|
||||||
|
if !exists {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
delete(r.nameToKey, name)
|
||||||
|
delete(r.keyToName, key)
|
||||||
|
}
|
54
pkg/registrar/registrar_test.go
Normal file
54
pkg/registrar/registrar_test.go
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package registrar
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
assertlib "github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestRegistrar(t *testing.T) {
|
||||||
|
r := NewRegistrar()
|
||||||
|
assert := assertlib.New(t)
|
||||||
|
|
||||||
|
t.Logf("should be able to reserve a name<->key mapping")
|
||||||
|
assert.NoError(r.Reserve("test-name-1", "test-id-1"))
|
||||||
|
|
||||||
|
t.Logf("should be able to reserve a new name<->key mapping")
|
||||||
|
assert.NoError(r.Reserve("test-name-2", "test-id-2"))
|
||||||
|
|
||||||
|
t.Logf("should be able to reserve the same name<->key mapping")
|
||||||
|
assert.NoError(r.Reserve("test-name-1", "test-id-1"))
|
||||||
|
|
||||||
|
t.Logf("should not be able to reserve conflict name<->key mapping")
|
||||||
|
assert.Error(r.Reserve("test-name-1", "test-id-conflict"))
|
||||||
|
assert.Error(r.Reserve("test-name-conflict", "test-id-2"))
|
||||||
|
|
||||||
|
t.Logf("should be able to release name<->key mapping by key")
|
||||||
|
r.ReleaseByKey("test-id-1")
|
||||||
|
|
||||||
|
t.Logf("should be able to release name<->key mapping by name")
|
||||||
|
r.ReleaseByName("test-name-2")
|
||||||
|
|
||||||
|
t.Logf("should be able to reserve new name<->key mapping after release")
|
||||||
|
assert.NoError(r.Reserve("test-name-1", "test-id-new"))
|
||||||
|
assert.NoError(r.Reserve("test-name-new", "test-id-2"))
|
||||||
|
|
||||||
|
t.Logf("should be able to reserve same name/key name<->key")
|
||||||
|
assert.NoError(r.Reserve("same-name-id", "same-name-id"))
|
||||||
|
}
|
125
pkg/server/helpers.go
Normal file
125
pkg/server/helpers.go
Normal file
@ -0,0 +1,125 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/stringid"
|
||||||
|
"github.com/docker/docker/pkg/truncindex"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd"
|
||||||
|
|
||||||
|
"github.com/kubernetes-incubator/cri-containerd/pkg/metadata"
|
||||||
|
|
||||||
|
"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// relativeRootfsPath is the rootfs path relative to bundle path.
|
||||||
|
relativeRootfsPath = "rootfs"
|
||||||
|
// defaultRuntime is the runtime to use in containerd. We may support
|
||||||
|
// other runtime in the future.
|
||||||
|
defaultRuntime = "linux"
|
||||||
|
// sandboxesDir contains all sandbox root. A sandbox root is the running
|
||||||
|
// directory of the sandbox, all files created for the sandbox will be
|
||||||
|
// placed under this directory.
|
||||||
|
sandboxesDir = "sandboxes"
|
||||||
|
// stdinNamedPipe is the name of stdin named pipe.
|
||||||
|
stdinNamedPipe = "stdin"
|
||||||
|
// stdoutNamedPipe is the name of stdout named pipe.
|
||||||
|
stdoutNamedPipe = "stdout"
|
||||||
|
// stderrNamedPipe is the name of stderr named pipe.
|
||||||
|
stderrNamedPipe = "stderr"
|
||||||
|
// Delimiter used to construct container/sandbox names.
|
||||||
|
nameDelimiter = "_"
|
||||||
|
// netNSFormat is the format of network namespace of a process.
|
||||||
|
netNSFormat = "/proc/%v/ns/net"
|
||||||
|
)
|
||||||
|
|
||||||
|
// generateID generates a random unique id.
|
||||||
|
func generateID() string {
|
||||||
|
return stringid.GenerateNonCryptoID()
|
||||||
|
}
|
||||||
|
|
||||||
|
// makeSandboxName generates sandbox name from sandbox metadata. The name
|
||||||
|
// generated is unique as long as sandbox metadata is unique.
|
||||||
|
func makeSandboxName(s *runtime.PodSandboxMetadata) string {
|
||||||
|
return strings.Join([]string{
|
||||||
|
s.Name, // 0
|
||||||
|
s.Namespace, // 1
|
||||||
|
s.Uid, // 2
|
||||||
|
fmt.Sprintf("%d", s.Attempt), // 3
|
||||||
|
}, nameDelimiter)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getCgroupsPath generates container cgroups path.
|
||||||
|
func getCgroupsPath(cgroupsParent string, id string) string {
|
||||||
|
// TODO(random-liu): [P0] Handle systemd.
|
||||||
|
return filepath.Join(cgroupsParent, id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getSandboxRootDir returns the root directory for managing sandbox files,
|
||||||
|
// e.g. named pipes.
|
||||||
|
func getSandboxRootDir(rootDir, id string) string {
|
||||||
|
return filepath.Join(rootDir, sandboxesDir, id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getStreamingPipes returns the stdin/stdout/stderr pipes path in the root.
|
||||||
|
func getStreamingPipes(rootDir string) (string, string, string) {
|
||||||
|
stdin := filepath.Join(rootDir, stdinNamedPipe)
|
||||||
|
stdout := filepath.Join(rootDir, stdoutNamedPipe)
|
||||||
|
stderr := filepath.Join(rootDir, stderrNamedPipe)
|
||||||
|
return stdin, stdout, stderr
|
||||||
|
}
|
||||||
|
|
||||||
|
// getNetworkNamespace returns the network namespace of a process.
|
||||||
|
func getNetworkNamespace(pid uint32) string {
|
||||||
|
return fmt.Sprintf(netNSFormat, pid)
|
||||||
|
}
|
||||||
|
|
||||||
|
// isContainerdContainerNotExistError checks whether a grpc error is containerd
|
||||||
|
// ErrContainerNotExist error.
|
||||||
|
// TODO(random-liu): Containerd should expose error better through api.
|
||||||
|
func isContainerdContainerNotExistError(grpcError error) bool {
|
||||||
|
return grpc.ErrorDesc(grpcError) == containerd.ErrContainerNotExist.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
// getSandbox gets the sandbox metadata from the sandbox store. It returns nil without
|
||||||
|
// error if the sandbox metadata is not found. It also tries to get full sandbox id and
|
||||||
|
// retry if the sandbox metadata is not found with the initial id.
|
||||||
|
func (c *criContainerdService) getSandbox(id string) (*metadata.SandboxMetadata, error) {
|
||||||
|
sandbox, err := c.sandboxStore.Get(id)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("sandbox metadata not found: %v", err)
|
||||||
|
}
|
||||||
|
if sandbox != nil {
|
||||||
|
return sandbox, nil
|
||||||
|
}
|
||||||
|
// sandbox is not found in metadata store, try to extract full id.
|
||||||
|
id, err = c.sandboxIDIndex.Get(id)
|
||||||
|
if err != nil {
|
||||||
|
if err == truncindex.ErrNotExist {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("sandbox id not found: %v", err)
|
||||||
|
}
|
||||||
|
return c.sandboxStore.Get(id)
|
||||||
|
}
|
59
pkg/server/helpers_test.go
Normal file
59
pkg/server/helpers_test.go
Normal file
@ -0,0 +1,59 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
|
"github.com/kubernetes-incubator/cri-containerd/pkg/metadata"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestGetSandbox(t *testing.T) {
|
||||||
|
c := newTestCRIContainerdService()
|
||||||
|
testID := "abcdefg"
|
||||||
|
testSandbox := metadata.SandboxMetadata{
|
||||||
|
ID: testID,
|
||||||
|
Name: "test-name",
|
||||||
|
}
|
||||||
|
assert.NoError(t, c.sandboxStore.Create(testSandbox))
|
||||||
|
assert.NoError(t, c.sandboxIDIndex.Add(testID))
|
||||||
|
|
||||||
|
for desc, test := range map[string]struct {
|
||||||
|
id string
|
||||||
|
expected *metadata.SandboxMetadata
|
||||||
|
}{
|
||||||
|
"full id": {
|
||||||
|
id: testID,
|
||||||
|
expected: &testSandbox,
|
||||||
|
},
|
||||||
|
"partial id": {
|
||||||
|
id: testID[:3],
|
||||||
|
expected: &testSandbox,
|
||||||
|
},
|
||||||
|
"non-exist id": {
|
||||||
|
id: "gfedcba",
|
||||||
|
expected: nil,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Logf("TestCase %q", desc)
|
||||||
|
sb, err := c.getSandbox(test.id)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, test.expected, sb)
|
||||||
|
}
|
||||||
|
}
|
@ -94,7 +94,7 @@ func (c *criContainerdService) imageReferenceResolver(ctx context.Context, ref s
|
|||||||
return resolvedImageName, manifest, compressedSize, fmt.Errorf("failed to resolve ref %q: err: %v", ref, err)
|
return resolvedImageName, manifest, compressedSize, fmt.Errorf("failed to resolve ref %q: err: %v", ref, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = c.imageStore.Put(ctx, resolvedImageName, desc)
|
err = c.imageStoreService.Put(ctx, resolvedImageName, desc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return resolvedImageName, manifest, compressedSize, fmt.Errorf("failed to put %q: desc: %v err: %v", resolvedImageName, desc, err)
|
return resolvedImageName, manifest, compressedSize, fmt.Errorf("failed to put %q: desc: %v err: %v", resolvedImageName, desc, err)
|
||||||
}
|
}
|
||||||
@ -107,7 +107,7 @@ func (c *criContainerdService) imageReferenceResolver(ctx context.Context, ref s
|
|||||||
return resolvedImageName, manifest, compressedSize, fmt.Errorf("failed to fetch %q: desc: %v err: %v", resolvedImageName, desc, err)
|
return resolvedImageName, manifest, compressedSize, fmt.Errorf("failed to fetch %q: desc: %v err: %v", resolvedImageName, desc, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
image, err := c.imageStore.Get(ctx, resolvedImageName)
|
image, err := c.imageStoreService.Get(ctx, resolvedImageName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return resolvedImageName, manifest, compressedSize,
|
return resolvedImageName, manifest, compressedSize,
|
||||||
fmt.Errorf("get failed for image:%q err: %v", resolvedImageName, err)
|
fmt.Errorf("get failed for image:%q err: %v", resolvedImageName, err)
|
||||||
@ -150,7 +150,7 @@ func (c *criContainerdService) pullImage(ctx context.Context, ref string) (image
|
|||||||
return desc, size, fmt.Errorf("failed to resolve ref %q: err: %v", ref, err)
|
return desc, size, fmt.Errorf("failed to resolve ref %q: err: %v", ref, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = c.imageStore.Put(ctx, resolvedImageName, desc)
|
err = c.imageStoreService.Put(ctx, resolvedImageName, desc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return desc, size, fmt.Errorf("failed to put %q: desc: %v err: %v", resolvedImageName, desc, err)
|
return desc, size, fmt.Errorf("failed to put %q: desc: %v err: %v", resolvedImageName, desc, err)
|
||||||
}
|
}
|
||||||
@ -165,7 +165,7 @@ func (c *criContainerdService) pullImage(ctx context.Context, ref string) (image
|
|||||||
return desc, size, fmt.Errorf("failed to fetch %q: desc: %v err: %v", resolvedImageName, desc, err)
|
return desc, size, fmt.Errorf("failed to fetch %q: desc: %v err: %v", resolvedImageName, desc, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
image, err := c.imageStore.Get(ctx, resolvedImageName)
|
image, err := c.imageStoreService.Get(ctx, resolvedImageName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return desc, size,
|
return desc, size,
|
||||||
fmt.Errorf("get failed for image:%q err: %v", resolvedImageName, err)
|
fmt.Errorf("get failed for image:%q err: %v", resolvedImageName, err)
|
||||||
|
@ -17,14 +17,117 @@ limitations under the License.
|
|||||||
package server
|
package server
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd/api/services/execution"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd/api/types/container"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||||
|
|
||||||
|
"github.com/kubernetes-incubator/cri-containerd/pkg/metadata"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ListPodSandbox returns a list of Sandbox.
|
// ListPodSandbox returns a list of Sandbox.
|
||||||
func (c *criContainerdService) ListPodSandbox(ctx context.Context, r *runtime.ListPodSandboxRequest) (*runtime.ListPodSandboxResponse, error) {
|
func (c *criContainerdService) ListPodSandbox(ctx context.Context, r *runtime.ListPodSandboxRequest) (retRes *runtime.ListPodSandboxResponse, retErr error) {
|
||||||
return nil, errors.New("not implemented")
|
glog.V(4).Infof("ListPodSandbox with filter %+v", r.GetFilter())
|
||||||
|
defer func() {
|
||||||
|
if retErr == nil {
|
||||||
|
glog.V(4).Infof("ListPodSandbox returns sandboxes %+v", retRes.GetItems())
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// List all sandbox metadata from store.
|
||||||
|
sandboxesInStore, err := c.sandboxStore.List()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to list metadata from sandbox store: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := c.containerService.List(ctx, &execution.ListRequest{})
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to list sandbox containers: %v", err)
|
||||||
|
}
|
||||||
|
sandboxesInContainerd := resp.Containers
|
||||||
|
|
||||||
|
var sandboxes []*runtime.PodSandbox
|
||||||
|
for _, sandboxInStore := range sandboxesInStore {
|
||||||
|
var sandboxInContainerd *container.Container
|
||||||
|
for _, s := range sandboxesInContainerd {
|
||||||
|
if s.ID == sandboxInStore.ID {
|
||||||
|
sandboxInContainerd = s
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set sandbox state to NOTREADY by default.
|
||||||
|
state := runtime.PodSandboxState_SANDBOX_NOTREADY
|
||||||
|
// If the sandbox container is running, return the sandbox as READY.
|
||||||
|
if sandboxInContainerd != nil && sandboxInContainerd.Status == container.Status_RUNNING {
|
||||||
|
state = runtime.PodSandboxState_SANDBOX_READY
|
||||||
|
}
|
||||||
|
|
||||||
|
sandboxes = append(sandboxes, toCRISandbox(sandboxInStore, state))
|
||||||
|
}
|
||||||
|
|
||||||
|
sandboxes = c.filterCRISandboxes(sandboxes, r.GetFilter())
|
||||||
|
return &runtime.ListPodSandboxResponse{Items: sandboxes}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// toCRISandbox converts sandbox metadata into CRI pod sandbox.
|
||||||
|
func toCRISandbox(meta *metadata.SandboxMetadata, state runtime.PodSandboxState) *runtime.PodSandbox {
|
||||||
|
return &runtime.PodSandbox{
|
||||||
|
Id: meta.ID,
|
||||||
|
Metadata: meta.Config.GetMetadata(),
|
||||||
|
State: state,
|
||||||
|
CreatedAt: meta.CreatedAt,
|
||||||
|
Labels: meta.Config.GetLabels(),
|
||||||
|
Annotations: meta.Config.GetAnnotations(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// filterCRISandboxes filters CRISandboxes.
|
||||||
|
func (c *criContainerdService) filterCRISandboxes(sandboxes []*runtime.PodSandbox, filter *runtime.PodSandboxFilter) []*runtime.PodSandbox {
|
||||||
|
if filter == nil {
|
||||||
|
return sandboxes
|
||||||
|
}
|
||||||
|
|
||||||
|
var filterID string
|
||||||
|
if filter.GetId() != "" {
|
||||||
|
// Handle truncate id. Use original filter if failed to convert.
|
||||||
|
var err error
|
||||||
|
filterID, err = c.sandboxIDIndex.Get(filter.GetId())
|
||||||
|
if err != nil {
|
||||||
|
filterID = filter.GetId()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
filtered := []*runtime.PodSandbox{}
|
||||||
|
for _, s := range sandboxes {
|
||||||
|
// Filter by id
|
||||||
|
if filterID != "" && filterID != s.Id {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Filter by state
|
||||||
|
if filter.GetState() != nil && filter.GetState().GetState() != s.State {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Filter by label
|
||||||
|
if filter.GetLabelSelector() != nil {
|
||||||
|
match := true
|
||||||
|
for k, v := range filter.GetLabelSelector() {
|
||||||
|
if s.Labels[k] != v {
|
||||||
|
match = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !match {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
filtered = append(filtered, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
return filtered
|
||||||
}
|
}
|
||||||
|
210
pkg/server/sandbox_list_test.go
Normal file
210
pkg/server/sandbox_list_test.go
Normal file
@ -0,0 +1,210 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd/api/types/container"
|
||||||
|
|
||||||
|
"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||||
|
|
||||||
|
"github.com/kubernetes-incubator/cri-containerd/pkg/metadata"
|
||||||
|
servertesting "github.com/kubernetes-incubator/cri-containerd/pkg/server/testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestToCRISandbox(t *testing.T) {
|
||||||
|
config := &runtime.PodSandboxConfig{
|
||||||
|
Metadata: &runtime.PodSandboxMetadata{
|
||||||
|
Name: "test-name",
|
||||||
|
Uid: "test-uid",
|
||||||
|
Namespace: "test-ns",
|
||||||
|
Attempt: 1,
|
||||||
|
},
|
||||||
|
Labels: map[string]string{"a": "b"},
|
||||||
|
Annotations: map[string]string{"c": "d"},
|
||||||
|
}
|
||||||
|
createdAt := time.Now().UnixNano()
|
||||||
|
meta := &metadata.SandboxMetadata{
|
||||||
|
ID: "test-id",
|
||||||
|
Name: "test-name",
|
||||||
|
Config: config,
|
||||||
|
CreatedAt: createdAt,
|
||||||
|
NetNS: "test-netns",
|
||||||
|
}
|
||||||
|
state := runtime.PodSandboxState_SANDBOX_READY
|
||||||
|
expect := &runtime.PodSandbox{
|
||||||
|
Id: "test-id",
|
||||||
|
Metadata: config.GetMetadata(),
|
||||||
|
State: state,
|
||||||
|
CreatedAt: createdAt,
|
||||||
|
Labels: config.GetLabels(),
|
||||||
|
Annotations: config.GetAnnotations(),
|
||||||
|
}
|
||||||
|
s := toCRISandbox(meta, state)
|
||||||
|
assert.Equal(t, expect, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFilterSandboxes(t *testing.T) {
|
||||||
|
c := newTestCRIContainerdService()
|
||||||
|
|
||||||
|
testSandboxes := []*runtime.PodSandbox{
|
||||||
|
{
|
||||||
|
Id: "1",
|
||||||
|
Metadata: &runtime.PodSandboxMetadata{Name: "name-1", Uid: "uid-1", Namespace: "ns-1", Attempt: 1},
|
||||||
|
State: runtime.PodSandboxState_SANDBOX_READY,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Id: "2",
|
||||||
|
Metadata: &runtime.PodSandboxMetadata{Name: "name-2", Uid: "uid-2", Namespace: "ns-2", Attempt: 2},
|
||||||
|
State: runtime.PodSandboxState_SANDBOX_NOTREADY,
|
||||||
|
Labels: map[string]string{"a": "b"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Id: "3",
|
||||||
|
Metadata: &runtime.PodSandboxMetadata{Name: "name-2", Uid: "uid-2", Namespace: "ns-2", Attempt: 2},
|
||||||
|
State: runtime.PodSandboxState_SANDBOX_READY,
|
||||||
|
Labels: map[string]string{"c": "d"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for desc, test := range map[string]struct {
|
||||||
|
filter *runtime.PodSandboxFilter
|
||||||
|
expect []*runtime.PodSandbox
|
||||||
|
}{
|
||||||
|
"no filter": {
|
||||||
|
expect: testSandboxes,
|
||||||
|
},
|
||||||
|
"id filter": {
|
||||||
|
filter: &runtime.PodSandboxFilter{Id: "2"},
|
||||||
|
expect: []*runtime.PodSandbox{testSandboxes[1]},
|
||||||
|
},
|
||||||
|
"state filter": {
|
||||||
|
filter: &runtime.PodSandboxFilter{
|
||||||
|
State: &runtime.PodSandboxStateValue{
|
||||||
|
State: runtime.PodSandboxState_SANDBOX_READY,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expect: []*runtime.PodSandbox{testSandboxes[0], testSandboxes[2]},
|
||||||
|
},
|
||||||
|
"label filter": {
|
||||||
|
filter: &runtime.PodSandboxFilter{
|
||||||
|
LabelSelector: map[string]string{"a": "b"},
|
||||||
|
},
|
||||||
|
expect: []*runtime.PodSandbox{testSandboxes[1]},
|
||||||
|
},
|
||||||
|
"mixed filter not matched": {
|
||||||
|
filter: &runtime.PodSandboxFilter{
|
||||||
|
Id: "1",
|
||||||
|
LabelSelector: map[string]string{"a": "b"},
|
||||||
|
},
|
||||||
|
expect: []*runtime.PodSandbox{},
|
||||||
|
},
|
||||||
|
"mixed filter matched": {
|
||||||
|
filter: &runtime.PodSandboxFilter{
|
||||||
|
State: &runtime.PodSandboxStateValue{
|
||||||
|
State: runtime.PodSandboxState_SANDBOX_READY,
|
||||||
|
},
|
||||||
|
LabelSelector: map[string]string{"c": "d"},
|
||||||
|
},
|
||||||
|
expect: []*runtime.PodSandbox{testSandboxes[2]},
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
filtered := c.filterCRISandboxes(testSandboxes, test.filter)
|
||||||
|
assert.Equal(t, test.expect, filtered, desc)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestListPodSandbox(t *testing.T) {
|
||||||
|
c := newTestCRIContainerdService()
|
||||||
|
|
||||||
|
fake := c.containerService.(*servertesting.FakeExecutionClient)
|
||||||
|
|
||||||
|
sandboxesInStore := []metadata.SandboxMetadata{
|
||||||
|
{
|
||||||
|
ID: "1",
|
||||||
|
Name: "name-1",
|
||||||
|
Config: &runtime.PodSandboxConfig{Metadata: &runtime.PodSandboxMetadata{Name: "name-1"}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "2",
|
||||||
|
Name: "name-2",
|
||||||
|
Config: &runtime.PodSandboxConfig{Metadata: &runtime.PodSandboxMetadata{Name: "name-2"}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "3",
|
||||||
|
Name: "name-3",
|
||||||
|
Config: &runtime.PodSandboxConfig{Metadata: &runtime.PodSandboxMetadata{Name: "name-3"}},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
sandboxesInContainerd := []container.Container{
|
||||||
|
// Running container with corresponding metadata
|
||||||
|
{
|
||||||
|
ID: "1",
|
||||||
|
Pid: 1,
|
||||||
|
Status: container.Status_RUNNING,
|
||||||
|
},
|
||||||
|
// Stopped container with corresponding metadata
|
||||||
|
{
|
||||||
|
ID: "2",
|
||||||
|
Pid: 2,
|
||||||
|
Status: container.Status_STOPPED,
|
||||||
|
},
|
||||||
|
// Container without corresponding metadata
|
||||||
|
{
|
||||||
|
ID: "4",
|
||||||
|
Pid: 4,
|
||||||
|
Status: container.Status_STOPPED,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
expect := []*runtime.PodSandbox{
|
||||||
|
{
|
||||||
|
Id: "1",
|
||||||
|
Metadata: &runtime.PodSandboxMetadata{Name: "name-1"},
|
||||||
|
State: runtime.PodSandboxState_SANDBOX_READY,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Id: "2",
|
||||||
|
Metadata: &runtime.PodSandboxMetadata{Name: "name-2"},
|
||||||
|
State: runtime.PodSandboxState_SANDBOX_NOTREADY,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Id: "3",
|
||||||
|
Metadata: &runtime.PodSandboxMetadata{Name: "name-3"},
|
||||||
|
State: runtime.PodSandboxState_SANDBOX_NOTREADY,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Inject test metadata
|
||||||
|
for _, s := range sandboxesInStore {
|
||||||
|
c.sandboxStore.Create(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Inject fake containerd containers
|
||||||
|
fake.SetFakeContainers(sandboxesInContainerd)
|
||||||
|
|
||||||
|
resp, err := c.ListPodSandbox(context.Background(), &runtime.ListPodSandboxRequest{})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
sandboxes := resp.GetItems()
|
||||||
|
assert.Len(t, sandboxes, len(expect))
|
||||||
|
for _, s := range expect {
|
||||||
|
assert.Contains(t, sandboxes, s)
|
||||||
|
}
|
||||||
|
}
|
@ -17,15 +17,75 @@ limitations under the License.
|
|||||||
package server
|
package server
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd/api/services/execution"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||||
)
|
)
|
||||||
|
|
||||||
// RemovePodSandbox removes the sandbox. If there are running containers in the
|
// RemovePodSandbox removes the sandbox. If there are running containers in the
|
||||||
// sandbox, they should be forcibly removed.
|
// sandbox, they should be forcibly removed.
|
||||||
func (c *criContainerdService) RemovePodSandbox(ctx context.Context, r *runtime.RemovePodSandboxRequest) (*runtime.RemovePodSandboxResponse, error) {
|
func (c *criContainerdService) RemovePodSandbox(ctx context.Context, r *runtime.RemovePodSandboxRequest) (retRes *runtime.RemovePodSandboxResponse, retErr error) {
|
||||||
return nil, errors.New("not implemented")
|
glog.V(2).Infof("RemovePodSandbox for sandbox %q", r.GetPodSandboxId())
|
||||||
|
defer func() {
|
||||||
|
if retErr == nil {
|
||||||
|
glog.V(2).Info("RemovePodSandbox returns successfully")
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
sandbox, err := c.getSandbox(r.GetPodSandboxId())
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to find sandbox %q: %v", r.GetPodSandboxId(), err)
|
||||||
|
}
|
||||||
|
if sandbox == nil {
|
||||||
|
// Do not return error if the id doesn't exist.
|
||||||
|
glog.V(5).Infof("RemovePodSandbox called for sandbox %q that does not exist",
|
||||||
|
r.GetPodSandboxId())
|
||||||
|
return &runtime.RemovePodSandboxResponse{}, nil
|
||||||
|
}
|
||||||
|
// Use the full sandbox id.
|
||||||
|
id := sandbox.ID
|
||||||
|
|
||||||
|
// TODO(random-liu): [P2] Remove all containers in the sandbox.
|
||||||
|
|
||||||
|
// Return error if sandbox container is not fully stopped.
|
||||||
|
_, err = c.containerService.Info(ctx, &execution.InfoRequest{ID: id})
|
||||||
|
if err != nil && !isContainerdContainerNotExistError(err) {
|
||||||
|
return nil, fmt.Errorf("failed to get sandbox container info for %q: %v", id, err)
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
return nil, fmt.Errorf("sandbox container %q is not fully stopped", id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(random-liu): [P0] Cleanup shm created in RunPodSandbox.
|
||||||
|
// TODO(random-liu): [P1] Remove permanent namespace once used.
|
||||||
|
|
||||||
|
// Cleanup the sandbox root directory.
|
||||||
|
sandboxRootDir := getSandboxRootDir(c.rootDir, id)
|
||||||
|
if err := c.os.RemoveAll(sandboxRootDir); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to remove sandbox root directory %q: %v",
|
||||||
|
sandboxRootDir, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove sandbox metadata from metadata store. Note that once the sandbox
|
||||||
|
// metadata is successfully deleted:
|
||||||
|
// 1) ListPodSandbox will not include this sandbox.
|
||||||
|
// 2) PodSandboxStatus and StopPodSandbox will return error.
|
||||||
|
// 3) On-going operations which have held the metadata reference will not be
|
||||||
|
// affected.
|
||||||
|
if err := c.sandboxStore.Delete(id); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to delete sandbox metadata for %q: %v", id, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Release the sandbox id from id index.
|
||||||
|
c.sandboxIDIndex.Delete(id) // nolint: errcheck
|
||||||
|
|
||||||
|
// Release the sandbox name reserved for the sandbox.
|
||||||
|
c.sandboxNameIndex.ReleaseByKey(id)
|
||||||
|
|
||||||
|
return &runtime.RemovePodSandboxResponse{}, nil
|
||||||
}
|
}
|
||||||
|
122
pkg/server/sandbox_remove_test.go
Normal file
122
pkg/server/sandbox_remove_test.go
Normal file
@ -0,0 +1,122 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd/api/types/container"
|
||||||
|
|
||||||
|
"github.com/kubernetes-incubator/cri-containerd/pkg/metadata"
|
||||||
|
ostesting "github.com/kubernetes-incubator/cri-containerd/pkg/os/testing"
|
||||||
|
servertesting "github.com/kubernetes-incubator/cri-containerd/pkg/server/testing"
|
||||||
|
|
||||||
|
"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestRemovePodSandbox(t *testing.T) {
|
||||||
|
testID := "test-id"
|
||||||
|
testName := "test-name"
|
||||||
|
testMetadata := metadata.SandboxMetadata{
|
||||||
|
ID: testID,
|
||||||
|
Name: testName,
|
||||||
|
}
|
||||||
|
for desc, test := range map[string]struct {
|
||||||
|
sandboxContainers []container.Container
|
||||||
|
injectMetadata bool
|
||||||
|
injectContainerdErr error
|
||||||
|
injectFSErr error
|
||||||
|
expectErr bool
|
||||||
|
expectRemoved string
|
||||||
|
expectCalls []string
|
||||||
|
}{
|
||||||
|
"should not return error if sandbox does not exist": {
|
||||||
|
injectMetadata: false,
|
||||||
|
expectErr: false,
|
||||||
|
expectCalls: []string{},
|
||||||
|
},
|
||||||
|
"should return error when sandbox container is not deleted": {
|
||||||
|
injectMetadata: true,
|
||||||
|
sandboxContainers: []container.Container{{ID: testID}},
|
||||||
|
expectErr: true,
|
||||||
|
expectCalls: []string{"info"},
|
||||||
|
},
|
||||||
|
"should return error when arbitrary containerd error is injected": {
|
||||||
|
injectMetadata: true,
|
||||||
|
injectContainerdErr: fmt.Errorf("arbitrary error"),
|
||||||
|
expectErr: true,
|
||||||
|
expectCalls: []string{"info"},
|
||||||
|
},
|
||||||
|
"should return error when error fs error is injected": {
|
||||||
|
injectMetadata: true,
|
||||||
|
injectFSErr: fmt.Errorf("fs error"),
|
||||||
|
expectRemoved: getSandboxRootDir(testRootDir, testID),
|
||||||
|
expectErr: true,
|
||||||
|
expectCalls: []string{"info"},
|
||||||
|
},
|
||||||
|
"should be able to successfully delete": {
|
||||||
|
injectMetadata: true,
|
||||||
|
expectRemoved: getSandboxRootDir(testRootDir, testID),
|
||||||
|
expectCalls: []string{"info"},
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Logf("TestCase %q", desc)
|
||||||
|
c := newTestCRIContainerdService()
|
||||||
|
fake := c.containerService.(*servertesting.FakeExecutionClient)
|
||||||
|
fakeOS := c.os.(*ostesting.FakeOS)
|
||||||
|
fake.SetFakeContainers(test.sandboxContainers)
|
||||||
|
if test.injectMetadata {
|
||||||
|
c.sandboxNameIndex.Reserve(testName, testID)
|
||||||
|
c.sandboxIDIndex.Add(testID)
|
||||||
|
c.sandboxStore.Create(testMetadata)
|
||||||
|
}
|
||||||
|
if test.injectContainerdErr != nil {
|
||||||
|
fake.InjectError("info", test.injectContainerdErr)
|
||||||
|
}
|
||||||
|
fakeOS.RemoveAllFn = func(path string) error {
|
||||||
|
assert.Equal(t, test.expectRemoved, path)
|
||||||
|
return test.injectFSErr
|
||||||
|
}
|
||||||
|
res, err := c.RemovePodSandbox(context.Background(), &runtime.RemovePodSandboxRequest{
|
||||||
|
PodSandboxId: testID,
|
||||||
|
})
|
||||||
|
assert.Equal(t, test.expectCalls, fake.GetCalledNames())
|
||||||
|
if test.expectErr {
|
||||||
|
assert.Error(t, err)
|
||||||
|
assert.Nil(t, res)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.NotNil(t, res)
|
||||||
|
assert.NoError(t, c.sandboxNameIndex.Reserve(testName, testID),
|
||||||
|
"sandbox name should be released")
|
||||||
|
_, err = c.sandboxIDIndex.Get(testID)
|
||||||
|
assert.Error(t, err, "sandbox id should be removed")
|
||||||
|
meta, err := c.sandboxStore.Get(testID)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Nil(t, meta, "sandbox metadata should be removed")
|
||||||
|
res, err = c.RemovePodSandbox(context.Background(), &runtime.RemovePodSandboxRequest{
|
||||||
|
PodSandboxId: testID,
|
||||||
|
})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.NotNil(t, res, "remove should be idempotent")
|
||||||
|
}
|
||||||
|
}
|
@ -17,15 +17,251 @@ limitations under the License.
|
|||||||
package server
|
package server
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
prototypes "github.com/gogo/protobuf/types"
|
||||||
|
"github.com/golang/glog"
|
||||||
|
runtimespec "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
|
"github.com/opencontainers/runtime-tools/generate"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd/api/services/execution"
|
||||||
|
"github.com/containerd/containerd/api/types/mount"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||||
|
|
||||||
|
"github.com/kubernetes-incubator/cri-containerd/pkg/metadata"
|
||||||
)
|
)
|
||||||
|
|
||||||
// RunPodSandbox creates and starts a pod-level sandbox. Runtimes should ensure
|
// RunPodSandbox creates and starts a pod-level sandbox. Runtimes should ensure
|
||||||
// the sandbox is in ready state.
|
// the sandbox is in ready state.
|
||||||
func (c *criContainerdService) RunPodSandbox(ctx context.Context, r *runtime.RunPodSandboxRequest) (*runtime.RunPodSandboxResponse, error) {
|
func (c *criContainerdService) RunPodSandbox(ctx context.Context, r *runtime.RunPodSandboxRequest) (retRes *runtime.RunPodSandboxResponse, retErr error) {
|
||||||
return nil, errors.New("not implemented")
|
glog.V(2).Infof("RunPodSandbox with config %+v", r.GetConfig())
|
||||||
|
defer func() {
|
||||||
|
if retErr == nil {
|
||||||
|
glog.V(2).Infof("RunPodSandbox returns sandbox id %q", retRes.GetPodSandboxId())
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
config := r.GetConfig()
|
||||||
|
|
||||||
|
// Generate unique id and name for the sandbox and reserve the name.
|
||||||
|
id := generateID()
|
||||||
|
name := makeSandboxName(config.GetMetadata())
|
||||||
|
// Reserve the sandbox name to avoid concurrent `RunPodSandbox` request starting the
|
||||||
|
// same sandbox.
|
||||||
|
if err := c.sandboxNameIndex.Reserve(name, id); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to reserve sandbox name %q: %v", name, err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
// Release the name if the function returns with an error.
|
||||||
|
if retErr != nil {
|
||||||
|
c.sandboxNameIndex.ReleaseByName(name)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
// Register the sandbox id.
|
||||||
|
if err := c.sandboxIDIndex.Add(id); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to insert sandbox id %q: %v", id, err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
// Delete the sandbox id if the function returns with an error.
|
||||||
|
if retErr != nil {
|
||||||
|
c.sandboxIDIndex.Delete(id) // nolint: errcheck
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Create initial sandbox metadata.
|
||||||
|
meta := metadata.SandboxMetadata{
|
||||||
|
ID: id,
|
||||||
|
Name: name,
|
||||||
|
Config: config,
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(random-liu): [P0] Ensure pause image snapshot, apply default image config
|
||||||
|
// and get snapshot mounts.
|
||||||
|
// Use fixed rootfs path and sleep command.
|
||||||
|
const rootPath = "/"
|
||||||
|
|
||||||
|
// TODO(random-liu): [P0] Set up sandbox network with network plugin.
|
||||||
|
|
||||||
|
// Create sandbox container root directory.
|
||||||
|
// Prepare streaming named pipe.
|
||||||
|
sandboxRootDir := getSandboxRootDir(c.rootDir, id)
|
||||||
|
if err := c.os.MkdirAll(sandboxRootDir, 0755); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create sandbox root directory %q: %v",
|
||||||
|
sandboxRootDir, err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if retErr != nil {
|
||||||
|
// Cleanup the sandbox root directory.
|
||||||
|
if err := c.os.RemoveAll(sandboxRootDir); err != nil {
|
||||||
|
glog.Errorf("Failed to remove sandbox root directory %q: %v",
|
||||||
|
sandboxRootDir, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// TODO(random-liu): [P1] Moving following logging related logic into util functions.
|
||||||
|
// Discard sandbox container output because we don't care about it.
|
||||||
|
_, stdout, stderr := getStreamingPipes(sandboxRootDir)
|
||||||
|
for _, p := range []string{stdout, stderr} {
|
||||||
|
f, err := c.os.OpenFifo(ctx, p, syscall.O_RDONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to open named pipe %q: %v", p, err)
|
||||||
|
}
|
||||||
|
defer func(c io.Closer) {
|
||||||
|
if retErr != nil {
|
||||||
|
c.Close()
|
||||||
|
}
|
||||||
|
}(f)
|
||||||
|
go func(r io.ReadCloser) {
|
||||||
|
// Discard the output for now.
|
||||||
|
io.Copy(ioutil.Discard, r) // nolint: errcheck
|
||||||
|
r.Close()
|
||||||
|
}(f)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start sandbox container.
|
||||||
|
spec := c.generateSandboxContainerSpec(id, config)
|
||||||
|
rawSpec, err := json.Marshal(spec)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to marshal oci spec %+v: %v", spec, err)
|
||||||
|
}
|
||||||
|
glog.V(4).Infof("Sandbox container spec: %+v", spec)
|
||||||
|
createOpts := &execution.CreateRequest{
|
||||||
|
ID: id,
|
||||||
|
Spec: &prototypes.Any{
|
||||||
|
TypeUrl: runtimespec.Version,
|
||||||
|
Value: rawSpec,
|
||||||
|
},
|
||||||
|
// TODO(random-liu): [P0] Get rootfs mount from containerd.
|
||||||
|
Rootfs: []*mount.Mount{
|
||||||
|
{
|
||||||
|
Type: "bind",
|
||||||
|
Source: rootPath,
|
||||||
|
Options: []string{
|
||||||
|
"rw",
|
||||||
|
"rbind",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Runtime: defaultRuntime,
|
||||||
|
// No stdin for sandbox container.
|
||||||
|
Stdout: stdout,
|
||||||
|
Stderr: stderr,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create sandbox container in containerd.
|
||||||
|
glog.V(5).Infof("Create sandbox container (id=%q, name=%q) with options %+v.",
|
||||||
|
id, name, createOpts)
|
||||||
|
createResp, err := c.containerService.Create(ctx, createOpts)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create sandbox container %q: %v",
|
||||||
|
id, err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if retErr != nil {
|
||||||
|
// Cleanup the sandbox container if an error is returned.
|
||||||
|
if _, err := c.containerService.Delete(ctx, &execution.DeleteRequest{ID: id}); err != nil {
|
||||||
|
glog.Errorf("Failed to delete sandbox container %q: %v",
|
||||||
|
id, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Start sandbox container in containerd.
|
||||||
|
if _, err := c.containerService.Start(ctx, &execution.StartRequest{ID: id}); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to start sandbox container %q: %v",
|
||||||
|
id, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add sandbox into sandbox store.
|
||||||
|
meta.CreatedAt = time.Now().UnixNano()
|
||||||
|
// TODO(random-liu): [P2] Replace with permanent network namespace.
|
||||||
|
meta.NetNS = getNetworkNamespace(createResp.Pid)
|
||||||
|
if err := c.sandboxStore.Create(meta); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to add sandbox metadata %+v into store: %v",
|
||||||
|
meta, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &runtime.RunPodSandboxResponse{PodSandboxId: id}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *criContainerdService) generateSandboxContainerSpec(id string, config *runtime.PodSandboxConfig) *runtimespec.Spec {
|
||||||
|
// TODO(random-liu): [P0] Get command from image config.
|
||||||
|
pauseCommand := []string{"sh", "-c", "while true; do sleep 1000000000; done"}
|
||||||
|
|
||||||
|
// Creates a spec Generator with the default spec.
|
||||||
|
// TODO(random-liu): [P1] Compare the default settings with docker and containerd default.
|
||||||
|
g := generate.New()
|
||||||
|
|
||||||
|
// Set relative root path.
|
||||||
|
g.SetRootPath(relativeRootfsPath)
|
||||||
|
|
||||||
|
// Set process commands.
|
||||||
|
g.SetProcessArgs(pauseCommand)
|
||||||
|
|
||||||
|
// Make root of sandbox container read-only.
|
||||||
|
g.SetRootReadonly(true)
|
||||||
|
|
||||||
|
// Set hostname.
|
||||||
|
g.SetHostname(config.GetHostname())
|
||||||
|
|
||||||
|
// TODO(random-liu): [P0] Set DNS options. Maintain a resolv.conf for the sandbox.
|
||||||
|
|
||||||
|
// TODO(random-liu): [P0] Add NamespaceGetter and PortMappingGetter to initialize network plugin.
|
||||||
|
|
||||||
|
// TODO(random-liu): [P0] Add annotation to identify the container is managed by cri-containerd.
|
||||||
|
// TODO(random-liu): [P2] Consider whether to add labels and annotations to the container.
|
||||||
|
|
||||||
|
// Set cgroups parent.
|
||||||
|
if config.GetLinux().GetCgroupParent() != "" {
|
||||||
|
cgroupsPath := getCgroupsPath(config.GetLinux().GetCgroupParent(), id)
|
||||||
|
g.SetLinuxCgroupsPath(cgroupsPath)
|
||||||
|
}
|
||||||
|
// When cgroup parent is not set, containerd-shim will create container in a child cgroup
|
||||||
|
// of the cgroup itself is in.
|
||||||
|
// TODO(random-liu): [P2] Set default cgroup path if cgroup parent is not specified.
|
||||||
|
|
||||||
|
// Set namespace options.
|
||||||
|
nsOptions := config.GetLinux().GetSecurityContext().GetNamespaceOptions()
|
||||||
|
// TODO(random-liu): [P1] Create permanent network namespace, so that we could still cleanup
|
||||||
|
// network namespace after sandbox container dies unexpectedly.
|
||||||
|
// By default, all namespaces are enabled for the container, runc will create a new namespace
|
||||||
|
// for it. By removing the namespace, the container will inherit the namespace of the runtime.
|
||||||
|
if nsOptions.GetHostNetwork() {
|
||||||
|
g.RemoveLinuxNamespace(string(runtimespec.NetworkNamespace)) // nolint: errcheck
|
||||||
|
// TODO(random-liu): [P1] Figure out how to handle UTS namespace.
|
||||||
|
}
|
||||||
|
|
||||||
|
if nsOptions.GetHostPid() {
|
||||||
|
g.RemoveLinuxNamespace(string(runtimespec.PIDNamespace)) // nolint: errcheck
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(random-liu): [P0] Deal with /dev/shm. Use host for HostIpc, and create and mount for
|
||||||
|
// non-HostIpc. What about mqueue?
|
||||||
|
if nsOptions.GetHostIpc() {
|
||||||
|
g.RemoveLinuxNamespace(string(runtimespec.IPCNamespace)) // nolint: errcheck
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(random-liu): [P1] Apply SeLinux options.
|
||||||
|
|
||||||
|
// TODO(random-liu): [P1] Set user.
|
||||||
|
|
||||||
|
// TODO(random-liu): [P1] Set supplemental group.
|
||||||
|
|
||||||
|
// TODO(random-liu): [P1] Set privileged.
|
||||||
|
|
||||||
|
// TODO(random-liu): [P2] Set sysctl from annotations.
|
||||||
|
|
||||||
|
// TODO(random-liu): [P2] Set apparmor and seccomp from annotations.
|
||||||
|
|
||||||
|
// TODO(random-liu): [P1] Set default sandbox container resource limit.
|
||||||
|
|
||||||
|
return g.Spec()
|
||||||
}
|
}
|
||||||
|
191
pkg/server/sandbox_run_test.go
Normal file
191
pkg/server/sandbox_run_test.go
Normal file
@ -0,0 +1,191 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"syscall"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd/api/services/execution"
|
||||||
|
runtimespec "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
|
|
||||||
|
ostesting "github.com/kubernetes-incubator/cri-containerd/pkg/os/testing"
|
||||||
|
servertesting "github.com/kubernetes-incubator/cri-containerd/pkg/server/testing"
|
||||||
|
|
||||||
|
"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
func getRunPodSandboxTestData() (*runtime.PodSandboxConfig, func(*testing.T, string, *runtimespec.Spec)) {
|
||||||
|
config := &runtime.PodSandboxConfig{
|
||||||
|
Metadata: &runtime.PodSandboxMetadata{
|
||||||
|
Name: "test-name",
|
||||||
|
Uid: "test-uid",
|
||||||
|
Namespace: "test-ns",
|
||||||
|
Attempt: 1,
|
||||||
|
},
|
||||||
|
Hostname: "test-hostname",
|
||||||
|
LogDirectory: "test-log-directory",
|
||||||
|
Labels: map[string]string{"a": "b"},
|
||||||
|
Annotations: map[string]string{"c": "d"},
|
||||||
|
Linux: &runtime.LinuxPodSandboxConfig{
|
||||||
|
CgroupParent: "/test/cgroup/parent",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
specCheck := func(t *testing.T, id string, spec *runtimespec.Spec) {
|
||||||
|
assert.Equal(t, "test-hostname", spec.Hostname)
|
||||||
|
assert.Equal(t, getCgroupsPath("/test/cgroup/parent", id), spec.Linux.CgroupsPath)
|
||||||
|
assert.Equal(t, relativeRootfsPath, spec.Root.Path)
|
||||||
|
assert.Equal(t, true, spec.Root.Readonly)
|
||||||
|
}
|
||||||
|
return config, specCheck
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGenerateSandboxContainerSpec(t *testing.T) {
|
||||||
|
testID := "test-id"
|
||||||
|
for desc, test := range map[string]struct {
|
||||||
|
configChange func(*runtime.PodSandboxConfig)
|
||||||
|
specCheck func(*testing.T, *runtimespec.Spec)
|
||||||
|
}{
|
||||||
|
"spec should reflect original config": {
|
||||||
|
specCheck: func(t *testing.T, spec *runtimespec.Spec) {
|
||||||
|
// runtime spec should have expected namespaces enabled by default.
|
||||||
|
require.NotNil(t, spec.Linux)
|
||||||
|
assert.Contains(t, spec.Linux.Namespaces, runtimespec.LinuxNamespace{
|
||||||
|
Type: runtimespec.NetworkNamespace,
|
||||||
|
})
|
||||||
|
assert.Contains(t, spec.Linux.Namespaces, runtimespec.LinuxNamespace{
|
||||||
|
Type: runtimespec.PIDNamespace,
|
||||||
|
})
|
||||||
|
assert.Contains(t, spec.Linux.Namespaces, runtimespec.LinuxNamespace{
|
||||||
|
Type: runtimespec.IPCNamespace,
|
||||||
|
})
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"host namespace": {
|
||||||
|
configChange: func(c *runtime.PodSandboxConfig) {
|
||||||
|
c.Linux.SecurityContext = &runtime.LinuxSandboxSecurityContext{
|
||||||
|
NamespaceOptions: &runtime.NamespaceOption{
|
||||||
|
HostNetwork: true,
|
||||||
|
HostPid: true,
|
||||||
|
HostIpc: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
specCheck: func(t *testing.T, spec *runtimespec.Spec) {
|
||||||
|
// runtime spec should disable expected namespaces in host mode.
|
||||||
|
require.NotNil(t, spec.Linux)
|
||||||
|
assert.NotContains(t, spec.Linux.Namespaces, runtimespec.LinuxNamespace{
|
||||||
|
Type: runtimespec.NetworkNamespace,
|
||||||
|
})
|
||||||
|
assert.NotContains(t, spec.Linux.Namespaces, runtimespec.LinuxNamespace{
|
||||||
|
Type: runtimespec.PIDNamespace,
|
||||||
|
})
|
||||||
|
assert.NotContains(t, spec.Linux.Namespaces, runtimespec.LinuxNamespace{
|
||||||
|
Type: runtimespec.IPCNamespace,
|
||||||
|
})
|
||||||
|
},
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Logf("TestCase %q", desc)
|
||||||
|
c := newTestCRIContainerdService()
|
||||||
|
config, specCheck := getRunPodSandboxTestData()
|
||||||
|
if test.configChange != nil {
|
||||||
|
test.configChange(config)
|
||||||
|
}
|
||||||
|
spec := c.generateSandboxContainerSpec(testID, config)
|
||||||
|
specCheck(t, testID, spec)
|
||||||
|
if test.specCheck != nil {
|
||||||
|
test.specCheck(t, spec)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRunPodSandbox(t *testing.T) {
|
||||||
|
config, specCheck := getRunPodSandboxTestData()
|
||||||
|
c := newTestCRIContainerdService()
|
||||||
|
fake := c.containerService.(*servertesting.FakeExecutionClient)
|
||||||
|
fakeOS := c.os.(*ostesting.FakeOS)
|
||||||
|
var dirs []string
|
||||||
|
var pipes []string
|
||||||
|
fakeOS.MkdirAllFn = func(path string, perm os.FileMode) error {
|
||||||
|
dirs = append(dirs, path)
|
||||||
|
assert.Equal(t, os.FileMode(0755), perm)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
fakeOS.OpenFifoFn = func(ctx context.Context, fn string, flag int, perm os.FileMode) (io.ReadWriteCloser, error) {
|
||||||
|
pipes = append(pipes, fn)
|
||||||
|
assert.Equal(t, syscall.O_RDONLY|syscall.O_CREAT|syscall.O_NONBLOCK, flag)
|
||||||
|
assert.Equal(t, os.FileMode(0700), perm)
|
||||||
|
return nopReadWriteCloser{}, nil
|
||||||
|
}
|
||||||
|
expectCalls := []string{"create", "start"}
|
||||||
|
|
||||||
|
res, err := c.RunPodSandbox(context.Background(), &runtime.RunPodSandboxRequest{Config: config})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
require.NotNil(t, res)
|
||||||
|
id := res.GetPodSandboxId()
|
||||||
|
|
||||||
|
assert.Len(t, dirs, 1)
|
||||||
|
assert.Equal(t, getSandboxRootDir(c.rootDir, id), dirs[0], "sandbox root directory should be created")
|
||||||
|
|
||||||
|
assert.Len(t, pipes, 2)
|
||||||
|
_, stdout, stderr := getStreamingPipes(getSandboxRootDir(c.rootDir, id))
|
||||||
|
assert.Contains(t, pipes, stdout, "sandbox stdout pipe should be created")
|
||||||
|
assert.Contains(t, pipes, stderr, "sandbox stderr pipe should be created")
|
||||||
|
|
||||||
|
assert.Equal(t, expectCalls, fake.GetCalledNames(), "expect containerd functions should be called")
|
||||||
|
calls := fake.GetCalledDetails()
|
||||||
|
createOpts := calls[0].Argument.(*execution.CreateRequest)
|
||||||
|
assert.Equal(t, id, createOpts.ID, "create id should be correct")
|
||||||
|
// TODO(random-liu): Test rootfs mount when image management part is integrated.
|
||||||
|
assert.Equal(t, stdout, createOpts.Stdout, "stdout pipe should be passed to containerd")
|
||||||
|
assert.Equal(t, stderr, createOpts.Stderr, "stderr pipe should be passed to containerd")
|
||||||
|
spec := &runtimespec.Spec{}
|
||||||
|
assert.NoError(t, json.Unmarshal(createOpts.Spec.Value, spec))
|
||||||
|
t.Logf("oci spec check")
|
||||||
|
specCheck(t, id, spec)
|
||||||
|
|
||||||
|
startID := calls[1].Argument.(*execution.StartRequest).ID
|
||||||
|
assert.Equal(t, id, startID, "start id should be correct")
|
||||||
|
|
||||||
|
meta, err := c.sandboxStore.Get(id)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, id, meta.ID, "metadata id should be correct")
|
||||||
|
err = c.sandboxNameIndex.Reserve(meta.Name, "random-id")
|
||||||
|
assert.Error(t, err, "metadata name should be reserved")
|
||||||
|
assert.Equal(t, config, meta.Config, "metadata config should be correct")
|
||||||
|
// TODO(random-liu): [P2] Add clock interface and use fake clock.
|
||||||
|
assert.NotZero(t, meta.CreatedAt, "metadata CreatedAt should be set")
|
||||||
|
info, err := fake.Info(context.Background(), &execution.InfoRequest{ID: id})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
pid := info.Pid
|
||||||
|
assert.Equal(t, meta.NetNS, getNetworkNamespace(pid), "metadata network namespace should be correct")
|
||||||
|
|
||||||
|
gotID, err := c.sandboxIDIndex.Get(id)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, id, gotID, "sandbox id should be indexed")
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(random-liu): [P1] Add unit test for different error cases to make sure
|
||||||
|
// the function cleans up on error properly.
|
@ -17,14 +17,85 @@ limitations under the License.
|
|||||||
package server
|
package server
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd/api/services/execution"
|
||||||
|
"github.com/containerd/containerd/api/types/container"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||||
|
|
||||||
|
"github.com/kubernetes-incubator/cri-containerd/pkg/metadata"
|
||||||
)
|
)
|
||||||
|
|
||||||
// PodSandboxStatus returns the status of the PodSandbox.
|
// PodSandboxStatus returns the status of the PodSandbox.
|
||||||
func (c *criContainerdService) PodSandboxStatus(ctx context.Context, r *runtime.PodSandboxStatusRequest) (*runtime.PodSandboxStatusResponse, error) {
|
func (c *criContainerdService) PodSandboxStatus(ctx context.Context, r *runtime.PodSandboxStatusRequest) (retRes *runtime.PodSandboxStatusResponse, retErr error) {
|
||||||
return nil, errors.New("not implemented")
|
glog.V(4).Infof("PodSandboxStatus for sandbox %q", r.GetPodSandboxId())
|
||||||
|
defer func() {
|
||||||
|
if retErr == nil {
|
||||||
|
glog.V(4).Infof("PodSandboxStatus returns status %+v", retRes.GetStatus())
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
sandbox, err := c.getSandbox(r.GetPodSandboxId())
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to find sandbox %q: %v", r.GetPodSandboxId(), err)
|
||||||
|
}
|
||||||
|
if sandbox == nil {
|
||||||
|
return nil, fmt.Errorf("sandbox %q does not exist", r.GetPodSandboxId())
|
||||||
|
}
|
||||||
|
// Use the full sandbox id.
|
||||||
|
id := sandbox.ID
|
||||||
|
|
||||||
|
info, err := c.containerService.Info(ctx, &execution.InfoRequest{ID: id})
|
||||||
|
if err != nil && !isContainerdContainerNotExistError(err) {
|
||||||
|
return nil, fmt.Errorf("failed to get sandbox container info for %q: %v", id, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set sandbox state to NOTREADY by default.
|
||||||
|
state := runtime.PodSandboxState_SANDBOX_NOTREADY
|
||||||
|
// If the sandbox container is running, treat it as READY.
|
||||||
|
if info != nil && info.Status == container.Status_RUNNING {
|
||||||
|
state = runtime.PodSandboxState_SANDBOX_READY
|
||||||
|
}
|
||||||
|
|
||||||
|
return &runtime.PodSandboxStatusResponse{Status: toCRISandboxStatus(sandbox, state)}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// toCRISandboxStatus converts sandbox metadata into CRI pod sandbox status.
|
||||||
|
func toCRISandboxStatus(meta *metadata.SandboxMetadata, state runtime.PodSandboxState) *runtime.PodSandboxStatus {
|
||||||
|
nsOpts := meta.Config.GetLinux().GetSecurityContext().GetNamespaceOptions()
|
||||||
|
netNS := meta.NetNS
|
||||||
|
if state == runtime.PodSandboxState_SANDBOX_NOTREADY {
|
||||||
|
// Return empty network namespace when sandbox is not ready.
|
||||||
|
// For kubenet, when sandbox is not running, both empty
|
||||||
|
// network namespace and a valid permanent network namespace
|
||||||
|
// work. Go with the first option here because it's the current
|
||||||
|
// behavior in Kubernetes.
|
||||||
|
netNS = ""
|
||||||
|
}
|
||||||
|
return &runtime.PodSandboxStatus{
|
||||||
|
Id: meta.ID,
|
||||||
|
Metadata: meta.Config.GetMetadata(),
|
||||||
|
State: state,
|
||||||
|
CreatedAt: meta.CreatedAt,
|
||||||
|
// TODO(random-liu): [P0] Get sandbox ip from network plugin.
|
||||||
|
Network: &runtime.PodSandboxNetworkStatus{},
|
||||||
|
Linux: &runtime.LinuxPodSandboxStatus{
|
||||||
|
Namespaces: &runtime.Namespace{
|
||||||
|
// TODO(random-liu): Revendor new CRI version and get
|
||||||
|
// rid of this field.
|
||||||
|
Network: netNS,
|
||||||
|
Options: &runtime.NamespaceOption{
|
||||||
|
HostNetwork: nsOpts.GetHostNetwork(),
|
||||||
|
HostPid: nsOpts.GetHostPid(),
|
||||||
|
HostIpc: nsOpts.GetHostIpc(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Labels: meta.Config.GetLabels(),
|
||||||
|
Annotations: meta.Config.GetAnnotations(),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
192
pkg/server/sandbox_status_test.go
Normal file
192
pkg/server/sandbox_status_test.go
Normal file
@ -0,0 +1,192 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd/api/types/container"
|
||||||
|
|
||||||
|
"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||||
|
|
||||||
|
"github.com/kubernetes-incubator/cri-containerd/pkg/metadata"
|
||||||
|
servertesting "github.com/kubernetes-incubator/cri-containerd/pkg/server/testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Variables used in the following test.
|
||||||
|
|
||||||
|
const sandboxStatusTestID = "test-id"
|
||||||
|
|
||||||
|
func getSandboxStatusTestData() (*metadata.SandboxMetadata, *runtime.PodSandboxStatus) {
|
||||||
|
config := &runtime.PodSandboxConfig{
|
||||||
|
Metadata: &runtime.PodSandboxMetadata{
|
||||||
|
Name: "test-name",
|
||||||
|
Uid: "test-uid",
|
||||||
|
Namespace: "test-ns",
|
||||||
|
Attempt: 1,
|
||||||
|
},
|
||||||
|
Linux: &runtime.LinuxPodSandboxConfig{
|
||||||
|
SecurityContext: &runtime.LinuxSandboxSecurityContext{
|
||||||
|
NamespaceOptions: &runtime.NamespaceOption{
|
||||||
|
HostNetwork: true,
|
||||||
|
HostPid: false,
|
||||||
|
HostIpc: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Labels: map[string]string{"a": "b"},
|
||||||
|
Annotations: map[string]string{"c": "d"},
|
||||||
|
}
|
||||||
|
|
||||||
|
createdAt := time.Now().UnixNano()
|
||||||
|
|
||||||
|
metadata := &metadata.SandboxMetadata{
|
||||||
|
ID: sandboxStatusTestID,
|
||||||
|
Name: "test-name",
|
||||||
|
Config: config,
|
||||||
|
CreatedAt: createdAt,
|
||||||
|
}
|
||||||
|
|
||||||
|
expectedStatus := &runtime.PodSandboxStatus{
|
||||||
|
Id: sandboxStatusTestID,
|
||||||
|
Metadata: config.GetMetadata(),
|
||||||
|
CreatedAt: createdAt,
|
||||||
|
Network: &runtime.PodSandboxNetworkStatus{},
|
||||||
|
Linux: &runtime.LinuxPodSandboxStatus{
|
||||||
|
Namespaces: &runtime.Namespace{
|
||||||
|
Options: &runtime.NamespaceOption{
|
||||||
|
HostNetwork: true,
|
||||||
|
HostPid: false,
|
||||||
|
HostIpc: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Labels: config.GetLabels(),
|
||||||
|
Annotations: config.GetAnnotations(),
|
||||||
|
}
|
||||||
|
|
||||||
|
return metadata, expectedStatus
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestToCRISandboxStatus(t *testing.T) {
|
||||||
|
for desc, test := range map[string]struct {
|
||||||
|
state runtime.PodSandboxState
|
||||||
|
expectNetNS string
|
||||||
|
}{
|
||||||
|
"ready sandbox should have network namespace": {
|
||||||
|
state: runtime.PodSandboxState_SANDBOX_READY,
|
||||||
|
expectNetNS: "test-netns",
|
||||||
|
},
|
||||||
|
"not ready sandbox should not have network namespace": {
|
||||||
|
state: runtime.PodSandboxState_SANDBOX_NOTREADY,
|
||||||
|
expectNetNS: "",
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
metadata, expect := getSandboxStatusTestData()
|
||||||
|
metadata.NetNS = "test-netns"
|
||||||
|
status := toCRISandboxStatus(metadata, test.state)
|
||||||
|
expect.Linux.Namespaces.Network = test.expectNetNS
|
||||||
|
expect.State = test.state
|
||||||
|
assert.Equal(t, expect, status, desc)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPodSandboxStatus(t *testing.T) {
|
||||||
|
for desc, test := range map[string]struct {
|
||||||
|
sandboxContainers []container.Container
|
||||||
|
injectMetadata bool
|
||||||
|
injectErr error
|
||||||
|
expectState runtime.PodSandboxState
|
||||||
|
expectErr bool
|
||||||
|
expectCalls []string
|
||||||
|
}{
|
||||||
|
"sandbox status without metadata": {
|
||||||
|
injectMetadata: false,
|
||||||
|
expectErr: true,
|
||||||
|
expectCalls: []string{},
|
||||||
|
},
|
||||||
|
"sandbox status with running sandbox container": {
|
||||||
|
sandboxContainers: []container.Container{{
|
||||||
|
ID: sandboxStatusTestID,
|
||||||
|
Pid: 1,
|
||||||
|
Status: container.Status_RUNNING,
|
||||||
|
}},
|
||||||
|
injectMetadata: true,
|
||||||
|
expectState: runtime.PodSandboxState_SANDBOX_READY,
|
||||||
|
expectCalls: []string{"info"},
|
||||||
|
},
|
||||||
|
"sandbox status with stopped sandbox container": {
|
||||||
|
sandboxContainers: []container.Container{{
|
||||||
|
ID: sandboxStatusTestID,
|
||||||
|
Pid: 1,
|
||||||
|
Status: container.Status_STOPPED,
|
||||||
|
}},
|
||||||
|
injectMetadata: true,
|
||||||
|
expectState: runtime.PodSandboxState_SANDBOX_NOTREADY,
|
||||||
|
expectCalls: []string{"info"},
|
||||||
|
},
|
||||||
|
"sandbox status with non-existing sandbox container": {
|
||||||
|
sandboxContainers: []container.Container{},
|
||||||
|
injectMetadata: true,
|
||||||
|
expectState: runtime.PodSandboxState_SANDBOX_NOTREADY,
|
||||||
|
expectCalls: []string{"info"},
|
||||||
|
},
|
||||||
|
"sandbox status with arbitrary error": {
|
||||||
|
sandboxContainers: []container.Container{{
|
||||||
|
ID: sandboxStatusTestID,
|
||||||
|
Pid: 1,
|
||||||
|
Status: container.Status_RUNNING,
|
||||||
|
}},
|
||||||
|
injectMetadata: true,
|
||||||
|
injectErr: errors.New("arbitrary error"),
|
||||||
|
expectErr: true,
|
||||||
|
expectCalls: []string{"info"},
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Logf("TestCase %q", desc)
|
||||||
|
metadata, expect := getSandboxStatusTestData()
|
||||||
|
c := newTestCRIContainerdService()
|
||||||
|
fake := c.containerService.(*servertesting.FakeExecutionClient)
|
||||||
|
fake.SetFakeContainers(test.sandboxContainers)
|
||||||
|
if test.injectMetadata {
|
||||||
|
assert.NoError(t, c.sandboxIDIndex.Add(metadata.ID))
|
||||||
|
assert.NoError(t, c.sandboxStore.Create(*metadata))
|
||||||
|
}
|
||||||
|
if test.injectErr != nil {
|
||||||
|
fake.InjectError("info", test.injectErr)
|
||||||
|
}
|
||||||
|
res, err := c.PodSandboxStatus(context.Background(), &runtime.PodSandboxStatusRequest{
|
||||||
|
PodSandboxId: sandboxStatusTestID,
|
||||||
|
})
|
||||||
|
assert.Equal(t, test.expectCalls, fake.GetCalledNames())
|
||||||
|
if test.expectErr {
|
||||||
|
assert.Error(t, err)
|
||||||
|
assert.Nil(t, res)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
assert.NoError(t, err)
|
||||||
|
require.NotNil(t, res)
|
||||||
|
expect.State = test.expectState
|
||||||
|
assert.Equal(t, expect, res.GetStatus())
|
||||||
|
}
|
||||||
|
}
|
@ -17,15 +17,44 @@ limitations under the License.
|
|||||||
package server
|
package server
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd/api/services/execution"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||||
)
|
)
|
||||||
|
|
||||||
// StopPodSandbox stops the sandbox. If there are any running containers in the
|
// StopPodSandbox stops the sandbox. If there are any running containers in the
|
||||||
// sandbox, they should be forcibly terminated.
|
// sandbox, they should be forcibly terminated.
|
||||||
func (c *criContainerdService) StopPodSandbox(ctx context.Context, r *runtime.StopPodSandboxRequest) (*runtime.StopPodSandboxResponse, error) {
|
func (c *criContainerdService) StopPodSandbox(ctx context.Context, r *runtime.StopPodSandboxRequest) (retRes *runtime.StopPodSandboxResponse, retErr error) {
|
||||||
return nil, errors.New("not implemented")
|
glog.V(2).Infof("StopPodSandbox for sandbox %q", r.GetPodSandboxId())
|
||||||
|
defer func() {
|
||||||
|
if retErr == nil {
|
||||||
|
glog.V(2).Info("StopPodSandbox returns successfully")
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
sandbox, err := c.getSandbox(r.GetPodSandboxId())
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to find sandbox %q: %v", r.GetPodSandboxId(), err)
|
||||||
|
}
|
||||||
|
if sandbox == nil {
|
||||||
|
return nil, fmt.Errorf("sandbox %q does not exist", r.GetPodSandboxId())
|
||||||
|
}
|
||||||
|
// Use the full sandbox id.
|
||||||
|
id := sandbox.ID
|
||||||
|
|
||||||
|
// TODO(random-liu): [P1] Handle sandbox container graceful deletion.
|
||||||
|
// Delete the sandbox container from containerd.
|
||||||
|
_, err = c.containerService.Delete(ctx, &execution.DeleteRequest{ID: id})
|
||||||
|
if err != nil && !isContainerdContainerNotExistError(err) {
|
||||||
|
return nil, fmt.Errorf("failed to delete sandbox container %q: %v", id, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(random-liu): [P0] Call network plugin to teardown network.
|
||||||
|
// TODO(random-liu): [P2] Stop all containers inside the sandbox.
|
||||||
|
return &runtime.StopPodSandboxResponse{}, nil
|
||||||
}
|
}
|
||||||
|
106
pkg/server/sandbox_stop_test.go
Normal file
106
pkg/server/sandbox_stop_test.go
Normal file
@ -0,0 +1,106 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd"
|
||||||
|
"github.com/containerd/containerd/api/types/container"
|
||||||
|
|
||||||
|
"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||||
|
|
||||||
|
"github.com/kubernetes-incubator/cri-containerd/pkg/metadata"
|
||||||
|
servertesting "github.com/kubernetes-incubator/cri-containerd/pkg/server/testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestStopPodSandbox(t *testing.T) {
|
||||||
|
testID := "test-id"
|
||||||
|
testSandbox := metadata.SandboxMetadata{
|
||||||
|
ID: testID,
|
||||||
|
Name: "test-name",
|
||||||
|
}
|
||||||
|
testContainer := container.Container{
|
||||||
|
ID: testID,
|
||||||
|
Pid: 1,
|
||||||
|
Status: container.Status_RUNNING,
|
||||||
|
}
|
||||||
|
|
||||||
|
for desc, test := range map[string]struct {
|
||||||
|
sandboxContainers []container.Container
|
||||||
|
injectSandbox bool
|
||||||
|
injectErr error
|
||||||
|
expectErr bool
|
||||||
|
expectCalls []string
|
||||||
|
}{
|
||||||
|
"stop non-existing sandbox": {
|
||||||
|
injectSandbox: false,
|
||||||
|
expectErr: true,
|
||||||
|
expectCalls: []string{},
|
||||||
|
},
|
||||||
|
"stop sandbox with sandbox container": {
|
||||||
|
sandboxContainers: []container.Container{testContainer},
|
||||||
|
injectSandbox: true,
|
||||||
|
expectErr: false,
|
||||||
|
expectCalls: []string{"delete"},
|
||||||
|
},
|
||||||
|
"stop sandbox with sandbox container not exist error": {
|
||||||
|
sandboxContainers: []container.Container{},
|
||||||
|
injectSandbox: true,
|
||||||
|
// Inject error to make sure fake execution client returns error.
|
||||||
|
injectErr: grpc.Errorf(codes.Unknown, containerd.ErrContainerNotExist.Error()),
|
||||||
|
expectErr: false,
|
||||||
|
expectCalls: []string{"delete"},
|
||||||
|
},
|
||||||
|
"stop sandbox with with arbitrary error": {
|
||||||
|
injectSandbox: true,
|
||||||
|
injectErr: grpc.Errorf(codes.Unknown, "arbitrary error"),
|
||||||
|
expectErr: true,
|
||||||
|
expectCalls: []string{"delete"},
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Logf("TestCase %q", desc)
|
||||||
|
c := newTestCRIContainerdService()
|
||||||
|
fake := c.containerService.(*servertesting.FakeExecutionClient)
|
||||||
|
fake.SetFakeContainers(test.sandboxContainers)
|
||||||
|
|
||||||
|
if test.injectSandbox {
|
||||||
|
assert.NoError(t, c.sandboxStore.Create(testSandbox))
|
||||||
|
c.sandboxIDIndex.Add(testID)
|
||||||
|
}
|
||||||
|
if test.injectErr != nil {
|
||||||
|
fake.InjectError("delete", test.injectErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
res, err := c.StopPodSandbox(context.Background(), &runtime.StopPodSandboxRequest{
|
||||||
|
PodSandboxId: testID,
|
||||||
|
})
|
||||||
|
if test.expectErr {
|
||||||
|
assert.Error(t, err)
|
||||||
|
assert.Nil(t, res)
|
||||||
|
} else {
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.NotNil(t, res)
|
||||||
|
}
|
||||||
|
assert.Equal(t, test.expectCalls, fake.GetCalledNames())
|
||||||
|
}
|
||||||
|
}
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package server
|
package server
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"github.com/docker/docker/pkg/truncindex"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
|
|
||||||
contentapi "github.com/containerd/containerd/api/services/content"
|
contentapi "github.com/containerd/containerd/api/services/content"
|
||||||
@ -29,13 +30,20 @@ import (
|
|||||||
contentservice "github.com/containerd/containerd/services/content"
|
contentservice "github.com/containerd/containerd/services/content"
|
||||||
imagesservice "github.com/containerd/containerd/services/images"
|
imagesservice "github.com/containerd/containerd/services/images"
|
||||||
rootfsservice "github.com/containerd/containerd/services/rootfs"
|
rootfsservice "github.com/containerd/containerd/services/rootfs"
|
||||||
|
|
||||||
"github.com/kubernetes-incubator/cri-containerd/pkg/metadata"
|
"github.com/kubernetes-incubator/cri-containerd/pkg/metadata"
|
||||||
"github.com/kubernetes-incubator/cri-containerd/pkg/metadata/store"
|
"github.com/kubernetes-incubator/cri-containerd/pkg/metadata/store"
|
||||||
|
osinterface "github.com/kubernetes-incubator/cri-containerd/pkg/os"
|
||||||
|
"github.com/kubernetes-incubator/cri-containerd/pkg/registrar"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||||
|
)
|
||||||
|
|
||||||
// TODO remove the underscores from the following imports as the services are
|
// TODO remove the underscores from the following imports as the services are
|
||||||
// implemented. "_" is being used to hold the reference to keep autocomplete
|
// implemented. "_" is being used to hold the reference to keep autocomplete
|
||||||
// from deleting them until referenced below.
|
// from deleting them until referenced below.
|
||||||
|
// nolint: golint
|
||||||
|
import (
|
||||||
_ "github.com/containerd/containerd/api/types/container"
|
_ "github.com/containerd/containerd/api/types/container"
|
||||||
_ "github.com/containerd/containerd/api/types/descriptor"
|
_ "github.com/containerd/containerd/api/types/descriptor"
|
||||||
_ "github.com/containerd/containerd/api/types/mount"
|
_ "github.com/containerd/containerd/api/types/mount"
|
||||||
@ -51,23 +59,53 @@ type CRIContainerdService interface {
|
|||||||
|
|
||||||
// criContainerdService implements CRIContainerdService.
|
// criContainerdService implements CRIContainerdService.
|
||||||
type criContainerdService struct {
|
type criContainerdService struct {
|
||||||
containerService execution.ContainerServiceClient
|
// os is an interface for all required os operations.
|
||||||
imageStore images.Store
|
os osinterface.OS
|
||||||
contentIngester content.Ingester
|
// rootDir is the directory for managing cri-containerd files.
|
||||||
contentProvider content.Provider
|
rootDir string
|
||||||
rootfsUnpacker rootfs.Unpacker
|
// sandboxStore stores all sandbox metadata.
|
||||||
|
sandboxStore metadata.SandboxStore
|
||||||
|
// imageMetadataStore stores all image metadata.
|
||||||
imageMetadataStore metadata.ImageMetadataStore
|
imageMetadataStore metadata.ImageMetadataStore
|
||||||
|
// sandboxNameIndex stores all sandbox names and make sure each name
|
||||||
|
// is unique.
|
||||||
|
sandboxNameIndex *registrar.Registrar
|
||||||
|
// sandboxIDIndex is trie tree for truncated id indexing, e.g. after an
|
||||||
|
// id "abcdefg" is added, we could use "abcd" to identify the same thing
|
||||||
|
// as long as there is no ambiguity.
|
||||||
|
sandboxIDIndex *truncindex.TruncIndex
|
||||||
|
// containerService is containerd container service client.
|
||||||
|
containerService execution.ContainerServiceClient
|
||||||
|
// contentIngester is the containerd service to ingest content into
|
||||||
|
// content store.
|
||||||
|
contentIngester content.Ingester
|
||||||
|
// contentProvider is the containerd service to get content from
|
||||||
|
// content store.
|
||||||
|
contentProvider content.Provider
|
||||||
|
// rootfsUnpacker is the containerd service to unpack image content
|
||||||
|
// into snapshots.
|
||||||
|
rootfsUnpacker rootfs.Unpacker
|
||||||
|
// imageStoreService is the containerd service to store and track
|
||||||
|
// image metadata.
|
||||||
|
imageStoreService images.Store
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewCRIContainerdService returns a new instance of CRIContainerdService
|
// NewCRIContainerdService returns a new instance of CRIContainerdService
|
||||||
func NewCRIContainerdService(conn *grpc.ClientConn) CRIContainerdService {
|
func NewCRIContainerdService(conn *grpc.ClientConn, rootDir string) CRIContainerdService {
|
||||||
// TODO: Initialize different containerd clients.
|
// TODO: Initialize different containerd clients.
|
||||||
|
// TODO(random-liu): [P2] Recover from runtime state and metadata store.
|
||||||
return &criContainerdService{
|
return &criContainerdService{
|
||||||
|
os: osinterface.RealOS{},
|
||||||
|
rootDir: rootDir,
|
||||||
|
sandboxStore: metadata.NewSandboxStore(store.NewMetadataStore()),
|
||||||
|
imageMetadataStore: metadata.NewImageMetadataStore(store.NewMetadataStore()),
|
||||||
|
// TODO(random-liu): Register sandbox id/name for recovered sandbox.
|
||||||
|
sandboxNameIndex: registrar.NewRegistrar(),
|
||||||
|
sandboxIDIndex: truncindex.NewTruncIndex(nil),
|
||||||
containerService: execution.NewContainerServiceClient(conn),
|
containerService: execution.NewContainerServiceClient(conn),
|
||||||
imageStore: imagesservice.NewStoreFromClient(imagesapi.NewImagesClient(conn)),
|
imageStoreService: imagesservice.NewStoreFromClient(imagesapi.NewImagesClient(conn)),
|
||||||
contentIngester: contentservice.NewIngesterFromClient(contentapi.NewContentClient(conn)),
|
contentIngester: contentservice.NewIngesterFromClient(contentapi.NewContentClient(conn)),
|
||||||
contentProvider: contentservice.NewProviderFromClient(contentapi.NewContentClient(conn)),
|
contentProvider: contentservice.NewProviderFromClient(contentapi.NewContentClient(conn)),
|
||||||
rootfsUnpacker: rootfsservice.NewUnpackerFromClient(rootfsapi.NewRootFSClient(conn)),
|
rootfsUnpacker: rootfsservice.NewUnpackerFromClient(rootfsapi.NewRootFSClient(conn)),
|
||||||
imageMetadataStore: metadata.NewImageMetadataStore(store.NewMetadataStore()),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
162
pkg/server/service_test.go
Normal file
162
pkg/server/service_test.go
Normal file
@ -0,0 +1,162 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/truncindex"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"github.com/kubernetes-incubator/cri-containerd/pkg/metadata"
|
||||||
|
"github.com/kubernetes-incubator/cri-containerd/pkg/metadata/store"
|
||||||
|
ostesting "github.com/kubernetes-incubator/cri-containerd/pkg/os/testing"
|
||||||
|
"github.com/kubernetes-incubator/cri-containerd/pkg/registrar"
|
||||||
|
servertesting "github.com/kubernetes-incubator/cri-containerd/pkg/server/testing"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd/api/services/execution"
|
||||||
|
|
||||||
|
"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
type nopReadWriteCloser struct{}
|
||||||
|
|
||||||
|
func (nopReadWriteCloser) Read(p []byte) (n int, err error) { return len(p), nil }
|
||||||
|
func (nopReadWriteCloser) Write(p []byte) (n int, err error) { return len(p), nil }
|
||||||
|
func (nopReadWriteCloser) Close() error { return nil }
|
||||||
|
|
||||||
|
const testRootDir = "/test/rootfs"
|
||||||
|
|
||||||
|
// newTestCRIContainerdService creates a fake criContainerdService for test.
|
||||||
|
func newTestCRIContainerdService() *criContainerdService {
|
||||||
|
return &criContainerdService{
|
||||||
|
os: ostesting.NewFakeOS(),
|
||||||
|
rootDir: testRootDir,
|
||||||
|
containerService: servertesting.NewFakeExecutionClient(),
|
||||||
|
sandboxStore: metadata.NewSandboxStore(store.NewMetadataStore()),
|
||||||
|
sandboxNameIndex: registrar.NewRegistrar(),
|
||||||
|
sandboxIDIndex: truncindex.NewTruncIndex(nil),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test all sandbox operations.
|
||||||
|
func TestSandboxOperations(t *testing.T) {
|
||||||
|
c := newTestCRIContainerdService()
|
||||||
|
fake := c.containerService.(*servertesting.FakeExecutionClient)
|
||||||
|
fakeOS := c.os.(*ostesting.FakeOS)
|
||||||
|
fakeOS.OpenFifoFn = func(ctx context.Context, fn string, flag int, perm os.FileMode) (io.ReadWriteCloser, error) {
|
||||||
|
return nopReadWriteCloser{}, nil
|
||||||
|
}
|
||||||
|
config := &runtime.PodSandboxConfig{
|
||||||
|
Metadata: &runtime.PodSandboxMetadata{
|
||||||
|
Name: "test-name",
|
||||||
|
Uid: "test-uid",
|
||||||
|
Namespace: "test-ns",
|
||||||
|
Attempt: 1,
|
||||||
|
},
|
||||||
|
Hostname: "test-hostname",
|
||||||
|
LogDirectory: "test-log-directory",
|
||||||
|
Labels: map[string]string{"a": "b"},
|
||||||
|
Annotations: map[string]string{"c": "d"},
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("should be able to run a pod sandbox")
|
||||||
|
runRes, err := c.RunPodSandbox(context.Background(), &runtime.RunPodSandboxRequest{Config: config})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
require.NotNil(t, runRes)
|
||||||
|
id := runRes.GetPodSandboxId()
|
||||||
|
|
||||||
|
t.Logf("should be able to get pod sandbox status")
|
||||||
|
info, err := fake.Info(context.Background(), &execution.InfoRequest{ID: id})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
expectSandboxStatus := &runtime.PodSandboxStatus{
|
||||||
|
Id: id,
|
||||||
|
Metadata: config.GetMetadata(),
|
||||||
|
// TODO(random-liu): [P2] Use fake clock for CreatedAt.
|
||||||
|
Network: &runtime.PodSandboxNetworkStatus{},
|
||||||
|
Linux: &runtime.LinuxPodSandboxStatus{
|
||||||
|
Namespaces: &runtime.Namespace{
|
||||||
|
Network: getNetworkNamespace(info.Pid),
|
||||||
|
Options: &runtime.NamespaceOption{
|
||||||
|
HostNetwork: false,
|
||||||
|
HostPid: false,
|
||||||
|
HostIpc: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Labels: config.GetLabels(),
|
||||||
|
Annotations: config.GetAnnotations(),
|
||||||
|
}
|
||||||
|
statusRes, err := c.PodSandboxStatus(context.Background(), &runtime.PodSandboxStatusRequest{PodSandboxId: id})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
require.NotNil(t, statusRes)
|
||||||
|
status := statusRes.GetStatus()
|
||||||
|
expectSandboxStatus.CreatedAt = status.GetCreatedAt()
|
||||||
|
assert.Equal(t, expectSandboxStatus, status)
|
||||||
|
|
||||||
|
t.Logf("should be able to list pod sandboxes")
|
||||||
|
expectSandbox := &runtime.PodSandbox{
|
||||||
|
Id: id,
|
||||||
|
Metadata: config.GetMetadata(),
|
||||||
|
State: runtime.PodSandboxState_SANDBOX_READY,
|
||||||
|
Labels: config.GetLabels(),
|
||||||
|
Annotations: config.GetAnnotations(),
|
||||||
|
}
|
||||||
|
listRes, err := c.ListPodSandbox(context.Background(), &runtime.ListPodSandboxRequest{})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
require.NotNil(t, listRes)
|
||||||
|
sandboxes := listRes.GetItems()
|
||||||
|
assert.Len(t, sandboxes, 1)
|
||||||
|
expectSandbox.CreatedAt = sandboxes[0].CreatedAt
|
||||||
|
assert.Equal(t, expectSandbox, sandboxes[0])
|
||||||
|
|
||||||
|
t.Logf("should be able to stop a pod sandbox")
|
||||||
|
stopRes, err := c.StopPodSandbox(context.Background(), &runtime.StopPodSandboxRequest{PodSandboxId: id})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
require.NotNil(t, stopRes)
|
||||||
|
statusRes, err = c.PodSandboxStatus(context.Background(), &runtime.PodSandboxStatusRequest{PodSandboxId: id})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
require.NotNil(t, statusRes)
|
||||||
|
assert.Equal(t, runtime.PodSandboxState_SANDBOX_NOTREADY, statusRes.GetStatus().GetState(),
|
||||||
|
"sandbox status should be NOTREADY after stopped")
|
||||||
|
listRes, err = c.ListPodSandbox(context.Background(), &runtime.ListPodSandboxRequest{})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
require.NotNil(t, listRes)
|
||||||
|
assert.Len(t, listRes.GetItems(), 1)
|
||||||
|
assert.Equal(t, runtime.PodSandboxState_SANDBOX_NOTREADY, listRes.GetItems()[0].State,
|
||||||
|
"sandbox in list should be NOTREADY after stopped")
|
||||||
|
|
||||||
|
t.Logf("should be able to remove a pod sandbox")
|
||||||
|
removeRes, err := c.RemovePodSandbox(context.Background(), &runtime.RemovePodSandboxRequest{PodSandboxId: id})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
require.NotNil(t, removeRes)
|
||||||
|
_, err = c.PodSandboxStatus(context.Background(), &runtime.PodSandboxStatusRequest{PodSandboxId: id})
|
||||||
|
assert.Error(t, err, "should not be able to get sandbox status after removed")
|
||||||
|
listRes, err = c.ListPodSandbox(context.Background(), &runtime.ListPodSandboxRequest{})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
require.NotNil(t, listRes)
|
||||||
|
assert.Empty(t, listRes.GetItems(), "should not be able to list the sandbox after removed")
|
||||||
|
|
||||||
|
t.Logf("should be able to create the sandbox again")
|
||||||
|
runRes, err = c.RunPodSandbox(context.Background(), &runtime.RunPodSandboxRequest{Config: config})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
require.NotNil(t, runRes)
|
||||||
|
}
|
1652
vendor/github.com/docker/docker/AUTHORS
generated
vendored
Normal file
1652
vendor/github.com/docker/docker/AUTHORS
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
191
vendor/github.com/docker/docker/LICENSE
generated
vendored
Normal file
191
vendor/github.com/docker/docker/LICENSE
generated
vendored
Normal file
@ -0,0 +1,191 @@
|
|||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
https://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
Copyright 2013-2016 Docker, Inc.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
19
vendor/github.com/docker/docker/NOTICE
generated
vendored
Normal file
19
vendor/github.com/docker/docker/NOTICE
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
Docker
|
||||||
|
Copyright 2012-2016 Docker, Inc.
|
||||||
|
|
||||||
|
This product includes software developed at Docker, Inc. (https://www.docker.com).
|
||||||
|
|
||||||
|
This product contains software (https://github.com/kr/pty) developed
|
||||||
|
by Keith Rarick, licensed under the MIT License.
|
||||||
|
|
||||||
|
The following is courtesy of our legal counsel:
|
||||||
|
|
||||||
|
|
||||||
|
Use and transfer of Docker may be subject to certain restrictions by the
|
||||||
|
United States and other governments.
|
||||||
|
It is your responsibility to ensure that your use and/or transfer does not
|
||||||
|
violate applicable laws.
|
||||||
|
|
||||||
|
For more information, please see https://www.bis.doc.gov
|
||||||
|
|
||||||
|
See also https://www.apache.org/dev/crypto.html and/or seek legal counsel.
|
71
vendor/github.com/docker/docker/pkg/random/random.go
generated
vendored
Normal file
71
vendor/github.com/docker/docker/pkg/random/random.go
generated
vendored
Normal file
@ -0,0 +1,71 @@
|
|||||||
|
package random
|
||||||
|
|
||||||
|
import (
|
||||||
|
cryptorand "crypto/rand"
|
||||||
|
"io"
|
||||||
|
"math"
|
||||||
|
"math/big"
|
||||||
|
"math/rand"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Rand is a global *rand.Rand instance, which initialized with NewSource() source.
|
||||||
|
var Rand = rand.New(NewSource())
|
||||||
|
|
||||||
|
// Reader is a global, shared instance of a pseudorandom bytes generator.
|
||||||
|
// It doesn't consume entropy.
|
||||||
|
var Reader io.Reader = &reader{rnd: Rand}
|
||||||
|
|
||||||
|
// copypaste from standard math/rand
|
||||||
|
type lockedSource struct {
|
||||||
|
lk sync.Mutex
|
||||||
|
src rand.Source
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *lockedSource) Int63() (n int64) {
|
||||||
|
r.lk.Lock()
|
||||||
|
n = r.src.Int63()
|
||||||
|
r.lk.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *lockedSource) Seed(seed int64) {
|
||||||
|
r.lk.Lock()
|
||||||
|
r.src.Seed(seed)
|
||||||
|
r.lk.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSource returns math/rand.Source safe for concurrent use and initialized
|
||||||
|
// with current unix-nano timestamp
|
||||||
|
func NewSource() rand.Source {
|
||||||
|
var seed int64
|
||||||
|
if cryptoseed, err := cryptorand.Int(cryptorand.Reader, big.NewInt(math.MaxInt64)); err != nil {
|
||||||
|
// This should not happen, but worst-case fallback to time-based seed.
|
||||||
|
seed = time.Now().UnixNano()
|
||||||
|
} else {
|
||||||
|
seed = cryptoseed.Int64()
|
||||||
|
}
|
||||||
|
return &lockedSource{
|
||||||
|
src: rand.NewSource(seed),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type reader struct {
|
||||||
|
rnd *rand.Rand
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *reader) Read(b []byte) (int, error) {
|
||||||
|
i := 0
|
||||||
|
for {
|
||||||
|
val := r.rnd.Int63()
|
||||||
|
for val > 0 {
|
||||||
|
b[i] = byte(val)
|
||||||
|
i++
|
||||||
|
if i == len(b) {
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
val >>= 8
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
1
vendor/github.com/docker/docker/pkg/stringid/README.md
generated
vendored
Normal file
1
vendor/github.com/docker/docker/pkg/stringid/README.md
generated
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
This package provides helper functions for dealing with string identifiers
|
69
vendor/github.com/docker/docker/pkg/stringid/stringid.go
generated
vendored
Normal file
69
vendor/github.com/docker/docker/pkg/stringid/stringid.go
generated
vendored
Normal file
@ -0,0 +1,69 @@
|
|||||||
|
// Package stringid provides helper functions for dealing with string identifiers
|
||||||
|
package stringid
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/rand"
|
||||||
|
"encoding/hex"
|
||||||
|
"io"
|
||||||
|
"regexp"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/random"
|
||||||
|
)
|
||||||
|
|
||||||
|
const shortLen = 12
|
||||||
|
|
||||||
|
var validShortID = regexp.MustCompile("^[a-z0-9]{12}$")
|
||||||
|
|
||||||
|
// IsShortID determines if an arbitrary string *looks like* a short ID.
|
||||||
|
func IsShortID(id string) bool {
|
||||||
|
return validShortID.MatchString(id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TruncateID returns a shorthand version of a string identifier for convenience.
|
||||||
|
// A collision with other shorthands is very unlikely, but possible.
|
||||||
|
// In case of a collision a lookup with TruncIndex.Get() will fail, and the caller
|
||||||
|
// will need to use a longer prefix, or the full-length Id.
|
||||||
|
func TruncateID(id string) string {
|
||||||
|
if i := strings.IndexRune(id, ':'); i >= 0 {
|
||||||
|
id = id[i+1:]
|
||||||
|
}
|
||||||
|
if len(id) > shortLen {
|
||||||
|
id = id[:shortLen]
|
||||||
|
}
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateID(crypto bool) string {
|
||||||
|
b := make([]byte, 32)
|
||||||
|
r := random.Reader
|
||||||
|
if crypto {
|
||||||
|
r = rand.Reader
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
if _, err := io.ReadFull(r, b); err != nil {
|
||||||
|
panic(err) // This shouldn't happen
|
||||||
|
}
|
||||||
|
id := hex.EncodeToString(b)
|
||||||
|
// if we try to parse the truncated for as an int and we don't have
|
||||||
|
// an error then the value is all numeric and causes issues when
|
||||||
|
// used as a hostname. ref #3869
|
||||||
|
if _, err := strconv.ParseInt(TruncateID(id), 10, 64); err == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenerateRandomID returns a unique id.
|
||||||
|
func GenerateRandomID() string {
|
||||||
|
return generateID(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenerateNonCryptoID generates unique id without using cryptographically
|
||||||
|
// secure sources of random.
|
||||||
|
// It helps you to save entropy.
|
||||||
|
func GenerateNonCryptoID() string {
|
||||||
|
return generateID(false)
|
||||||
|
}
|
137
vendor/github.com/docker/docker/pkg/truncindex/truncindex.go
generated
vendored
Normal file
137
vendor/github.com/docker/docker/pkg/truncindex/truncindex.go
generated
vendored
Normal file
@ -0,0 +1,137 @@
|
|||||||
|
// Package truncindex provides a general 'index tree', used by Docker
|
||||||
|
// in order to be able to reference containers by only a few unambiguous
|
||||||
|
// characters of their id.
|
||||||
|
package truncindex
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/tchap/go-patricia/patricia"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrEmptyPrefix is an error returned if the prefix was empty.
|
||||||
|
ErrEmptyPrefix = errors.New("Prefix can't be empty")
|
||||||
|
|
||||||
|
// ErrIllegalChar is returned when a space is in the ID
|
||||||
|
ErrIllegalChar = errors.New("illegal character: ' '")
|
||||||
|
|
||||||
|
// ErrNotExist is returned when ID or its prefix not found in index.
|
||||||
|
ErrNotExist = errors.New("ID does not exist")
|
||||||
|
)
|
||||||
|
|
||||||
|
// ErrAmbiguousPrefix is returned if the prefix was ambiguous
|
||||||
|
// (multiple ids for the prefix).
|
||||||
|
type ErrAmbiguousPrefix struct {
|
||||||
|
prefix string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e ErrAmbiguousPrefix) Error() string {
|
||||||
|
return fmt.Sprintf("Multiple IDs found with provided prefix: %s", e.prefix)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TruncIndex allows the retrieval of string identifiers by any of their unique prefixes.
|
||||||
|
// This is used to retrieve image and container IDs by more convenient shorthand prefixes.
|
||||||
|
type TruncIndex struct {
|
||||||
|
sync.RWMutex
|
||||||
|
trie *patricia.Trie
|
||||||
|
ids map[string]struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTruncIndex creates a new TruncIndex and initializes with a list of IDs.
|
||||||
|
func NewTruncIndex(ids []string) (idx *TruncIndex) {
|
||||||
|
idx = &TruncIndex{
|
||||||
|
ids: make(map[string]struct{}),
|
||||||
|
|
||||||
|
// Change patricia max prefix per node length,
|
||||||
|
// because our len(ID) always 64
|
||||||
|
trie: patricia.NewTrie(patricia.MaxPrefixPerNode(64)),
|
||||||
|
}
|
||||||
|
for _, id := range ids {
|
||||||
|
idx.addID(id)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (idx *TruncIndex) addID(id string) error {
|
||||||
|
if strings.Contains(id, " ") {
|
||||||
|
return ErrIllegalChar
|
||||||
|
}
|
||||||
|
if id == "" {
|
||||||
|
return ErrEmptyPrefix
|
||||||
|
}
|
||||||
|
if _, exists := idx.ids[id]; exists {
|
||||||
|
return fmt.Errorf("id already exists: '%s'", id)
|
||||||
|
}
|
||||||
|
idx.ids[id] = struct{}{}
|
||||||
|
if inserted := idx.trie.Insert(patricia.Prefix(id), struct{}{}); !inserted {
|
||||||
|
return fmt.Errorf("failed to insert id: %s", id)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add adds a new ID to the TruncIndex.
|
||||||
|
func (idx *TruncIndex) Add(id string) error {
|
||||||
|
idx.Lock()
|
||||||
|
defer idx.Unlock()
|
||||||
|
if err := idx.addID(id); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete removes an ID from the TruncIndex. If there are multiple IDs
|
||||||
|
// with the given prefix, an error is thrown.
|
||||||
|
func (idx *TruncIndex) Delete(id string) error {
|
||||||
|
idx.Lock()
|
||||||
|
defer idx.Unlock()
|
||||||
|
if _, exists := idx.ids[id]; !exists || id == "" {
|
||||||
|
return fmt.Errorf("no such id: '%s'", id)
|
||||||
|
}
|
||||||
|
delete(idx.ids, id)
|
||||||
|
if deleted := idx.trie.Delete(patricia.Prefix(id)); !deleted {
|
||||||
|
return fmt.Errorf("no such id: '%s'", id)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get retrieves an ID from the TruncIndex. If there are multiple IDs
|
||||||
|
// with the given prefix, an error is thrown.
|
||||||
|
func (idx *TruncIndex) Get(s string) (string, error) {
|
||||||
|
if s == "" {
|
||||||
|
return "", ErrEmptyPrefix
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
id string
|
||||||
|
)
|
||||||
|
subTreeVisitFunc := func(prefix patricia.Prefix, item patricia.Item) error {
|
||||||
|
if id != "" {
|
||||||
|
// we haven't found the ID if there are two or more IDs
|
||||||
|
id = ""
|
||||||
|
return ErrAmbiguousPrefix{prefix: string(prefix)}
|
||||||
|
}
|
||||||
|
id = string(prefix)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
idx.RLock()
|
||||||
|
defer idx.RUnlock()
|
||||||
|
if err := idx.trie.VisitSubtree(patricia.Prefix(s), subTreeVisitFunc); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if id != "" {
|
||||||
|
return id, nil
|
||||||
|
}
|
||||||
|
return "", ErrNotExist
|
||||||
|
}
|
||||||
|
|
||||||
|
// Iterate iterates over all stored IDs, and passes each of them to the given handler.
|
||||||
|
func (idx *TruncIndex) Iterate(handler func(id string)) {
|
||||||
|
idx.trie.Visit(func(prefix patricia.Prefix, item patricia.Item) error {
|
||||||
|
handler(string(prefix))
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
1
vendor/github.com/docker/docker/project/CONTRIBUTORS.md
generated
vendored
Symbolic link
1
vendor/github.com/docker/docker/project/CONTRIBUTORS.md
generated
vendored
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
../CONTRIBUTING.md
|
191
vendor/github.com/opencontainers/runtime-tools/LICENSE
generated
vendored
Normal file
191
vendor/github.com/opencontainers/runtime-tools/LICENSE
generated
vendored
Normal file
@ -0,0 +1,191 @@
|
|||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
Copyright 2015 The Linux Foundation.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
1107
vendor/github.com/opencontainers/runtime-tools/generate/generate.go
generated
vendored
Normal file
1107
vendor/github.com/opencontainers/runtime-tools/generate/generate.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
12
vendor/github.com/opencontainers/runtime-tools/generate/seccomp/consts.go
generated
vendored
Normal file
12
vendor/github.com/opencontainers/runtime-tools/generate/seccomp/consts.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
package seccomp
|
||||||
|
|
||||||
|
const (
|
||||||
|
seccompOverwrite = "overwrite"
|
||||||
|
seccompAppend = "append"
|
||||||
|
nothing = "nothing"
|
||||||
|
kill = "kill"
|
||||||
|
trap = "trap"
|
||||||
|
trace = "trace"
|
||||||
|
allow = "allow"
|
||||||
|
errno = "errno"
|
||||||
|
)
|
127
vendor/github.com/opencontainers/runtime-tools/generate/seccomp/parse_action.go
generated
vendored
Normal file
127
vendor/github.com/opencontainers/runtime-tools/generate/seccomp/parse_action.go
generated
vendored
Normal file
@ -0,0 +1,127 @@
|
|||||||
|
package seccomp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
rspec "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SyscallOpts contain options for parsing syscall rules
|
||||||
|
type SyscallOpts struct {
|
||||||
|
Action string
|
||||||
|
Syscall string
|
||||||
|
Index string
|
||||||
|
Value string
|
||||||
|
ValueTwo string
|
||||||
|
Operator string
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseSyscallFlag takes a SyscallOpts struct and the seccomp configuration
|
||||||
|
// and sets the new syscall rule accordingly
|
||||||
|
func ParseSyscallFlag(args SyscallOpts, config *rspec.LinuxSeccomp) error {
|
||||||
|
var arguments []string
|
||||||
|
if args.Index != "" && args.Value != "" && args.ValueTwo != "" && args.Operator != "" {
|
||||||
|
arguments = []string{args.Action, args.Syscall, args.Index, args.Value,
|
||||||
|
args.ValueTwo, args.Operator}
|
||||||
|
} else {
|
||||||
|
arguments = []string{args.Action, args.Syscall}
|
||||||
|
}
|
||||||
|
|
||||||
|
action, _ := parseAction(arguments[0])
|
||||||
|
if action == config.DefaultAction {
|
||||||
|
return fmt.Errorf("default action already set as %s", action)
|
||||||
|
}
|
||||||
|
|
||||||
|
var newSyscall rspec.LinuxSyscall
|
||||||
|
numOfArgs := len(arguments)
|
||||||
|
if numOfArgs == 6 || numOfArgs == 2 {
|
||||||
|
argStruct, err := parseArguments(arguments[1:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
newSyscall = newSyscallStruct(arguments[1], action, argStruct)
|
||||||
|
} else {
|
||||||
|
return fmt.Errorf("incorrect number of arguments to ParseSyscall: %d", numOfArgs)
|
||||||
|
}
|
||||||
|
|
||||||
|
descison, err := decideCourseOfAction(&newSyscall, config.Syscalls)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
delimDescison := strings.Split(descison, ":")
|
||||||
|
|
||||||
|
if delimDescison[0] == seccompAppend {
|
||||||
|
config.Syscalls = append(config.Syscalls, newSyscall)
|
||||||
|
}
|
||||||
|
|
||||||
|
if delimDescison[0] == seccompOverwrite {
|
||||||
|
indexForOverwrite, err := strconv.ParseInt(delimDescison[1], 10, 32)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
config.Syscalls[indexForOverwrite] = newSyscall
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var actions = map[string]rspec.LinuxSeccompAction{
|
||||||
|
"allow": rspec.ActAllow,
|
||||||
|
"errno": rspec.ActErrno,
|
||||||
|
"kill": rspec.ActKill,
|
||||||
|
"trace": rspec.ActTrace,
|
||||||
|
"trap": rspec.ActTrap,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Take passed action, return the SCMP_ACT_<ACTION> version of it
|
||||||
|
func parseAction(action string) (rspec.LinuxSeccompAction, error) {
|
||||||
|
a, ok := actions[action]
|
||||||
|
if !ok {
|
||||||
|
return "", fmt.Errorf("unrecognized action: %s", action)
|
||||||
|
}
|
||||||
|
return a, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseDefaultAction sets the default action of the seccomp configuration
|
||||||
|
// and then removes any rules that were already specified with this action
|
||||||
|
func ParseDefaultAction(action string, config *rspec.LinuxSeccomp) error {
|
||||||
|
if action == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
defaultAction, err := parseAction(action)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
config.DefaultAction = defaultAction
|
||||||
|
err = RemoveAllMatchingRules(config, action)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseDefaultActionForce simply sets the default action of the seccomp configuration
|
||||||
|
func ParseDefaultActionForce(action string, config *rspec.LinuxSeccomp) error {
|
||||||
|
if action == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
defaultAction, err := parseAction(action)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
config.DefaultAction = defaultAction
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newSyscallStruct(name string, action rspec.LinuxSeccompAction, args []rspec.LinuxSeccompArg) rspec.LinuxSyscall {
|
||||||
|
syscallStruct := rspec.LinuxSyscall{
|
||||||
|
Names: []string{name},
|
||||||
|
Action: action,
|
||||||
|
Args: args,
|
||||||
|
}
|
||||||
|
return syscallStruct
|
||||||
|
}
|
55
vendor/github.com/opencontainers/runtime-tools/generate/seccomp/parse_architecture.go
generated
vendored
Normal file
55
vendor/github.com/opencontainers/runtime-tools/generate/seccomp/parse_architecture.go
generated
vendored
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
package seccomp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
rspec "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ParseArchitectureFlag takes the raw string passed with the --arch flag, parses it
|
||||||
|
// and updates the Seccomp config accordingly
|
||||||
|
func ParseArchitectureFlag(architectureArg string, config *rspec.LinuxSeccomp) error {
|
||||||
|
correctedArch, err := parseArch(architectureArg)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
shouldAppend := true
|
||||||
|
for _, alreadySpecified := range config.Architectures {
|
||||||
|
if correctedArch == alreadySpecified {
|
||||||
|
shouldAppend = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if shouldAppend {
|
||||||
|
config.Architectures = append(config.Architectures, correctedArch)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseArch(arch string) (rspec.Arch, error) {
|
||||||
|
arches := map[string]rspec.Arch{
|
||||||
|
"x86": rspec.ArchX86,
|
||||||
|
"amd64": rspec.ArchX86_64,
|
||||||
|
"x32": rspec.ArchX32,
|
||||||
|
"arm": rspec.ArchARM,
|
||||||
|
"arm64": rspec.ArchAARCH64,
|
||||||
|
"mips": rspec.ArchMIPS,
|
||||||
|
"mips64": rspec.ArchMIPS64,
|
||||||
|
"mips64n32": rspec.ArchMIPS64N32,
|
||||||
|
"mipsel": rspec.ArchMIPSEL,
|
||||||
|
"mipsel64": rspec.ArchMIPSEL64,
|
||||||
|
"mipsel64n32": rspec.ArchMIPSEL64N32,
|
||||||
|
"parisc": rspec.ArchPARISC,
|
||||||
|
"parisc64": rspec.ArchPARISC64,
|
||||||
|
"ppc": rspec.ArchPPC,
|
||||||
|
"ppc64": rspec.ArchPPC64,
|
||||||
|
"ppc64le": rspec.ArchPPC64LE,
|
||||||
|
"s390": rspec.ArchS390,
|
||||||
|
"s390x": rspec.ArchS390X,
|
||||||
|
}
|
||||||
|
a, ok := arches[arch]
|
||||||
|
if !ok {
|
||||||
|
return "", fmt.Errorf("unrecognized architecture: %s", arch)
|
||||||
|
}
|
||||||
|
return a, nil
|
||||||
|
}
|
73
vendor/github.com/opencontainers/runtime-tools/generate/seccomp/parse_arguments.go
generated
vendored
Normal file
73
vendor/github.com/opencontainers/runtime-tools/generate/seccomp/parse_arguments.go
generated
vendored
Normal file
@ -0,0 +1,73 @@
|
|||||||
|
package seccomp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
rspec "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
|
)
|
||||||
|
|
||||||
|
// parseArguments takes a list of arguments (delimArgs). It parses and fills out
|
||||||
|
// the argument information and returns a slice of arg structs
|
||||||
|
func parseArguments(delimArgs []string) ([]rspec.LinuxSeccompArg, error) {
|
||||||
|
nilArgSlice := []rspec.LinuxSeccompArg{}
|
||||||
|
numberOfArgs := len(delimArgs)
|
||||||
|
|
||||||
|
// No parameters passed with syscall
|
||||||
|
if numberOfArgs == 1 {
|
||||||
|
return nilArgSlice, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Correct number of parameters passed with syscall
|
||||||
|
if numberOfArgs == 5 {
|
||||||
|
syscallIndex, err := strconv.ParseUint(delimArgs[1], 10, 0)
|
||||||
|
if err != nil {
|
||||||
|
return nilArgSlice, err
|
||||||
|
}
|
||||||
|
|
||||||
|
syscallValue, err := strconv.ParseUint(delimArgs[2], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nilArgSlice, err
|
||||||
|
}
|
||||||
|
|
||||||
|
syscallValueTwo, err := strconv.ParseUint(delimArgs[3], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nilArgSlice, err
|
||||||
|
}
|
||||||
|
|
||||||
|
syscallOp, err := parseOperator(delimArgs[4])
|
||||||
|
if err != nil {
|
||||||
|
return nilArgSlice, err
|
||||||
|
}
|
||||||
|
|
||||||
|
argStruct := rspec.LinuxSeccompArg{
|
||||||
|
Index: uint(syscallIndex),
|
||||||
|
Value: syscallValue,
|
||||||
|
ValueTwo: syscallValueTwo,
|
||||||
|
Op: syscallOp,
|
||||||
|
}
|
||||||
|
|
||||||
|
argSlice := []rspec.LinuxSeccompArg{}
|
||||||
|
argSlice = append(argSlice, argStruct)
|
||||||
|
return argSlice, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nilArgSlice, fmt.Errorf("incorrect number of arguments passed with syscall: %d", numberOfArgs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseOperator(operator string) (rspec.LinuxSeccompOperator, error) {
|
||||||
|
operators := map[string]rspec.LinuxSeccompOperator{
|
||||||
|
"NE": rspec.OpNotEqual,
|
||||||
|
"LT": rspec.OpLessThan,
|
||||||
|
"LE": rspec.OpLessEqual,
|
||||||
|
"EQ": rspec.OpEqualTo,
|
||||||
|
"GE": rspec.OpGreaterEqual,
|
||||||
|
"GT": rspec.OpGreaterThan,
|
||||||
|
"ME": rspec.OpMaskedEqual,
|
||||||
|
}
|
||||||
|
o, ok := operators[operator]
|
||||||
|
if !ok {
|
||||||
|
return "", fmt.Errorf("unrecognized operator: %s", operator)
|
||||||
|
}
|
||||||
|
return o, nil
|
||||||
|
}
|
62
vendor/github.com/opencontainers/runtime-tools/generate/seccomp/parse_remove.go
generated
vendored
Normal file
62
vendor/github.com/opencontainers/runtime-tools/generate/seccomp/parse_remove.go
generated
vendored
Normal file
@ -0,0 +1,62 @@
|
|||||||
|
package seccomp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
rspec "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RemoveAction takes the argument string that was passed with the --remove flag,
|
||||||
|
// parses it, and updates the Seccomp config accordingly
|
||||||
|
func RemoveAction(arguments string, config *rspec.LinuxSeccomp) error {
|
||||||
|
if config == nil {
|
||||||
|
return fmt.Errorf("Cannot remove action from nil Seccomp pointer")
|
||||||
|
}
|
||||||
|
|
||||||
|
var syscallsToRemove []string
|
||||||
|
if strings.Contains(arguments, ",") {
|
||||||
|
syscallsToRemove = strings.Split(arguments, ",")
|
||||||
|
} else {
|
||||||
|
syscallsToRemove = append(syscallsToRemove, arguments)
|
||||||
|
}
|
||||||
|
|
||||||
|
for counter, syscallStruct := range config.Syscalls {
|
||||||
|
if reflect.DeepEqual(syscallsToRemove, syscallStruct.Names) {
|
||||||
|
config.Syscalls = append(config.Syscalls[:counter], config.Syscalls[counter+1:]...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveAllSeccompRules removes all seccomp syscall rules
|
||||||
|
func RemoveAllSeccompRules(config *rspec.LinuxSeccomp) error {
|
||||||
|
if config == nil {
|
||||||
|
return fmt.Errorf("Cannot remove action from nil Seccomp pointer")
|
||||||
|
}
|
||||||
|
newSyscallSlice := []rspec.LinuxSyscall{}
|
||||||
|
config.Syscalls = newSyscallSlice
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveAllMatchingRules will remove any syscall rules that match the specified action
|
||||||
|
func RemoveAllMatchingRules(config *rspec.LinuxSeccomp, action string) error {
|
||||||
|
if config == nil {
|
||||||
|
return fmt.Errorf("Cannot remove action from nil Seccomp pointer")
|
||||||
|
}
|
||||||
|
|
||||||
|
seccompAction, err := parseAction(action)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, syscall := range config.Syscalls {
|
||||||
|
if reflect.DeepEqual(syscall.Action, seccompAction) {
|
||||||
|
RemoveAction(strings.Join(syscall.Names, ","), config)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
578
vendor/github.com/opencontainers/runtime-tools/generate/seccomp/seccomp_default.go
generated
vendored
Normal file
578
vendor/github.com/opencontainers/runtime-tools/generate/seccomp/seccomp_default.go
generated
vendored
Normal file
@ -0,0 +1,578 @@
|
|||||||
|
package seccomp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"runtime"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/opencontainers/runtime-spec/specs-go"
|
||||||
|
rspec "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
|
)
|
||||||
|
|
||||||
|
func arches() []rspec.Arch {
|
||||||
|
native := runtime.GOARCH
|
||||||
|
|
||||||
|
switch native {
|
||||||
|
case "amd64":
|
||||||
|
return []rspec.Arch{rspec.ArchX86_64, rspec.ArchX86, rspec.ArchX32}
|
||||||
|
case "arm64":
|
||||||
|
return []rspec.Arch{rspec.ArchARM, rspec.ArchAARCH64}
|
||||||
|
case "mips64":
|
||||||
|
return []rspec.Arch{rspec.ArchMIPS, rspec.ArchMIPS64, rspec.ArchMIPS64N32}
|
||||||
|
case "mips64n32":
|
||||||
|
return []rspec.Arch{rspec.ArchMIPS, rspec.ArchMIPS64, rspec.ArchMIPS64N32}
|
||||||
|
case "mipsel64":
|
||||||
|
return []rspec.Arch{rspec.ArchMIPSEL, rspec.ArchMIPSEL64, rspec.ArchMIPSEL64N32}
|
||||||
|
case "mipsel64n32":
|
||||||
|
return []rspec.Arch{rspec.ArchMIPSEL, rspec.ArchMIPSEL64, rspec.ArchMIPSEL64N32}
|
||||||
|
case "s390x":
|
||||||
|
return []rspec.Arch{rspec.ArchS390, rspec.ArchS390X}
|
||||||
|
default:
|
||||||
|
return []rspec.Arch{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultProfile defines the whitelist for the default seccomp profile.
|
||||||
|
func DefaultProfile(rs *specs.Spec) *rspec.LinuxSeccomp {
|
||||||
|
|
||||||
|
syscalls := []rspec.LinuxSyscall{
|
||||||
|
{
|
||||||
|
Names: []string{
|
||||||
|
"accept",
|
||||||
|
"accept4",
|
||||||
|
"access",
|
||||||
|
"alarm",
|
||||||
|
"bind",
|
||||||
|
"brk",
|
||||||
|
"capget",
|
||||||
|
"capset",
|
||||||
|
"chdir",
|
||||||
|
"chmod",
|
||||||
|
"chown",
|
||||||
|
"chown32",
|
||||||
|
"clock_getres",
|
||||||
|
"clock_gettime",
|
||||||
|
"clock_nanosleep",
|
||||||
|
"close",
|
||||||
|
"connect",
|
||||||
|
"copy_file_range",
|
||||||
|
"creat",
|
||||||
|
"dup",
|
||||||
|
"dup2",
|
||||||
|
"dup3",
|
||||||
|
"epoll_create",
|
||||||
|
"epoll_create1",
|
||||||
|
"epoll_ctl",
|
||||||
|
"epoll_ctl_old",
|
||||||
|
"epoll_pwait",
|
||||||
|
"epoll_wait",
|
||||||
|
"epoll_wait_old",
|
||||||
|
"eventfd",
|
||||||
|
"eventfd2",
|
||||||
|
"execve",
|
||||||
|
"execveat",
|
||||||
|
"exit",
|
||||||
|
"exit_group",
|
||||||
|
"faccessat",
|
||||||
|
"fadvise64",
|
||||||
|
"fadvise64_64",
|
||||||
|
"fallocate",
|
||||||
|
"fanotify_mark",
|
||||||
|
"fchdir",
|
||||||
|
"fchmod",
|
||||||
|
"fchmodat",
|
||||||
|
"fchown",
|
||||||
|
"fchown32",
|
||||||
|
"fchownat",
|
||||||
|
"fcntl",
|
||||||
|
"fcntl64",
|
||||||
|
"fdatasync",
|
||||||
|
"fgetxattr",
|
||||||
|
"flistxattr",
|
||||||
|
"flock",
|
||||||
|
"fork",
|
||||||
|
"fremovexattr",
|
||||||
|
"fsetxattr",
|
||||||
|
"fstat",
|
||||||
|
"fstat64",
|
||||||
|
"fstatat64",
|
||||||
|
"fstatfs",
|
||||||
|
"fstatfs64",
|
||||||
|
"fsync",
|
||||||
|
"ftruncate",
|
||||||
|
"ftruncate64",
|
||||||
|
"futex",
|
||||||
|
"futimesat",
|
||||||
|
"getcpu",
|
||||||
|
"getcwd",
|
||||||
|
"getdents",
|
||||||
|
"getdents64",
|
||||||
|
"getegid",
|
||||||
|
"getegid32",
|
||||||
|
"geteuid",
|
||||||
|
"geteuid32",
|
||||||
|
"getgid",
|
||||||
|
"getgid32",
|
||||||
|
"getgroups",
|
||||||
|
"getgroups32",
|
||||||
|
"getitimer",
|
||||||
|
"getpeername",
|
||||||
|
"getpgid",
|
||||||
|
"getpgrp",
|
||||||
|
"getpid",
|
||||||
|
"getppid",
|
||||||
|
"getpriority",
|
||||||
|
"getrandom",
|
||||||
|
"getresgid",
|
||||||
|
"getresgid32",
|
||||||
|
"getresuid",
|
||||||
|
"getresuid32",
|
||||||
|
"getrlimit",
|
||||||
|
"get_robust_list",
|
||||||
|
"getrusage",
|
||||||
|
"getsid",
|
||||||
|
"getsockname",
|
||||||
|
"getsockopt",
|
||||||
|
"get_thread_area",
|
||||||
|
"gettid",
|
||||||
|
"gettimeofday",
|
||||||
|
"getuid",
|
||||||
|
"getuid32",
|
||||||
|
"getxattr",
|
||||||
|
"inotify_add_watch",
|
||||||
|
"inotify_init",
|
||||||
|
"inotify_init1",
|
||||||
|
"inotify_rm_watch",
|
||||||
|
"io_cancel",
|
||||||
|
"ioctl",
|
||||||
|
"io_destroy",
|
||||||
|
"io_getevents",
|
||||||
|
"ioprio_get",
|
||||||
|
"ioprio_set",
|
||||||
|
"io_setup",
|
||||||
|
"io_submit",
|
||||||
|
"ipc",
|
||||||
|
"kill",
|
||||||
|
"lchown",
|
||||||
|
"lchown32",
|
||||||
|
"lgetxattr",
|
||||||
|
"link",
|
||||||
|
"linkat",
|
||||||
|
"listen",
|
||||||
|
"listxattr",
|
||||||
|
"llistxattr",
|
||||||
|
"_llseek",
|
||||||
|
"lremovexattr",
|
||||||
|
"lseek",
|
||||||
|
"lsetxattr",
|
||||||
|
"lstat",
|
||||||
|
"lstat64",
|
||||||
|
"madvise",
|
||||||
|
"memfd_create",
|
||||||
|
"mincore",
|
||||||
|
"mkdir",
|
||||||
|
"mkdirat",
|
||||||
|
"mknod",
|
||||||
|
"mknodat",
|
||||||
|
"mlock",
|
||||||
|
"mlock2",
|
||||||
|
"mlockall",
|
||||||
|
"mmap",
|
||||||
|
"mmap2",
|
||||||
|
"mprotect",
|
||||||
|
"mq_getsetattr",
|
||||||
|
"mq_notify",
|
||||||
|
"mq_open",
|
||||||
|
"mq_timedreceive",
|
||||||
|
"mq_timedsend",
|
||||||
|
"mq_unlink",
|
||||||
|
"mremap",
|
||||||
|
"msgctl",
|
||||||
|
"msgget",
|
||||||
|
"msgrcv",
|
||||||
|
"msgsnd",
|
||||||
|
"msync",
|
||||||
|
"munlock",
|
||||||
|
"munlockall",
|
||||||
|
"munmap",
|
||||||
|
"nanosleep",
|
||||||
|
"newfstatat",
|
||||||
|
"_newselect",
|
||||||
|
"open",
|
||||||
|
"openat",
|
||||||
|
"pause",
|
||||||
|
"pipe",
|
||||||
|
"pipe2",
|
||||||
|
"poll",
|
||||||
|
"ppoll",
|
||||||
|
"prctl",
|
||||||
|
"pread64",
|
||||||
|
"preadv",
|
||||||
|
"prlimit64",
|
||||||
|
"pselect6",
|
||||||
|
"pwrite64",
|
||||||
|
"pwritev",
|
||||||
|
"read",
|
||||||
|
"readahead",
|
||||||
|
"readlink",
|
||||||
|
"readlinkat",
|
||||||
|
"readv",
|
||||||
|
"recv",
|
||||||
|
"recvfrom",
|
||||||
|
"recvmmsg",
|
||||||
|
"recvmsg",
|
||||||
|
"remap_file_pages",
|
||||||
|
"removexattr",
|
||||||
|
"rename",
|
||||||
|
"renameat",
|
||||||
|
"renameat2",
|
||||||
|
"restart_syscall",
|
||||||
|
"rmdir",
|
||||||
|
"rt_sigaction",
|
||||||
|
"rt_sigpending",
|
||||||
|
"rt_sigprocmask",
|
||||||
|
"rt_sigqueueinfo",
|
||||||
|
"rt_sigreturn",
|
||||||
|
"rt_sigsuspend",
|
||||||
|
"rt_sigtimedwait",
|
||||||
|
"rt_tgsigqueueinfo",
|
||||||
|
"sched_getaffinity",
|
||||||
|
"sched_getattr",
|
||||||
|
"sched_getparam",
|
||||||
|
"sched_get_priority_max",
|
||||||
|
"sched_get_priority_min",
|
||||||
|
"sched_getscheduler",
|
||||||
|
"sched_rr_get_interval",
|
||||||
|
"sched_setaffinity",
|
||||||
|
"sched_setattr",
|
||||||
|
"sched_setparam",
|
||||||
|
"sched_setscheduler",
|
||||||
|
"sched_yield",
|
||||||
|
"seccomp",
|
||||||
|
"select",
|
||||||
|
"semctl",
|
||||||
|
"semget",
|
||||||
|
"semop",
|
||||||
|
"semtimedop",
|
||||||
|
"send",
|
||||||
|
"sendfile",
|
||||||
|
"sendfile64",
|
||||||
|
"sendmmsg",
|
||||||
|
"sendmsg",
|
||||||
|
"sendto",
|
||||||
|
"setfsgid",
|
||||||
|
"setfsgid32",
|
||||||
|
"setfsuid",
|
||||||
|
"setfsuid32",
|
||||||
|
"setgid",
|
||||||
|
"setgid32",
|
||||||
|
"setgroups",
|
||||||
|
"setgroups32",
|
||||||
|
"setitimer",
|
||||||
|
"setpgid",
|
||||||
|
"setpriority",
|
||||||
|
"setregid",
|
||||||
|
"setregid32",
|
||||||
|
"setresgid",
|
||||||
|
"setresgid32",
|
||||||
|
"setresuid",
|
||||||
|
"setresuid32",
|
||||||
|
"setreuid",
|
||||||
|
"setreuid32",
|
||||||
|
"setrlimit",
|
||||||
|
"set_robust_list",
|
||||||
|
"setsid",
|
||||||
|
"setsockopt",
|
||||||
|
"set_thread_area",
|
||||||
|
"set_tid_address",
|
||||||
|
"setuid",
|
||||||
|
"setuid32",
|
||||||
|
"setxattr",
|
||||||
|
"shmat",
|
||||||
|
"shmctl",
|
||||||
|
"shmdt",
|
||||||
|
"shmget",
|
||||||
|
"shutdown",
|
||||||
|
"sigaltstack",
|
||||||
|
"signalfd",
|
||||||
|
"signalfd4",
|
||||||
|
"sigreturn",
|
||||||
|
"socket",
|
||||||
|
"socketcall",
|
||||||
|
"socketpair",
|
||||||
|
"splice",
|
||||||
|
"stat",
|
||||||
|
"stat64",
|
||||||
|
"statfs",
|
||||||
|
"statfs64",
|
||||||
|
"symlink",
|
||||||
|
"symlinkat",
|
||||||
|
"sync",
|
||||||
|
"sync_file_range",
|
||||||
|
"syncfs",
|
||||||
|
"sysinfo",
|
||||||
|
"syslog",
|
||||||
|
"tee",
|
||||||
|
"tgkill",
|
||||||
|
"time",
|
||||||
|
"timer_create",
|
||||||
|
"timer_delete",
|
||||||
|
"timerfd_create",
|
||||||
|
"timerfd_gettime",
|
||||||
|
"timerfd_settime",
|
||||||
|
"timer_getoverrun",
|
||||||
|
"timer_gettime",
|
||||||
|
"timer_settime",
|
||||||
|
"times",
|
||||||
|
"tkill",
|
||||||
|
"truncate",
|
||||||
|
"truncate64",
|
||||||
|
"ugetrlimit",
|
||||||
|
"umask",
|
||||||
|
"uname",
|
||||||
|
"unlink",
|
||||||
|
"unlinkat",
|
||||||
|
"utime",
|
||||||
|
"utimensat",
|
||||||
|
"utimes",
|
||||||
|
"vfork",
|
||||||
|
"vmsplice",
|
||||||
|
"wait4",
|
||||||
|
"waitid",
|
||||||
|
"waitpid",
|
||||||
|
"write",
|
||||||
|
"writev",
|
||||||
|
},
|
||||||
|
Action: rspec.ActAllow,
|
||||||
|
Args: []rspec.LinuxSeccompArg{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Names: []string{"personality"},
|
||||||
|
Action: rspec.ActAllow,
|
||||||
|
Args: []rspec.LinuxSeccompArg{
|
||||||
|
{
|
||||||
|
Index: 0,
|
||||||
|
Value: 0x0,
|
||||||
|
Op: rspec.OpEqualTo,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Index: 0,
|
||||||
|
Value: 0x0008,
|
||||||
|
Op: rspec.OpEqualTo,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Index: 0,
|
||||||
|
Value: 0xffffffff,
|
||||||
|
Op: rspec.OpEqualTo,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
var sysCloneFlagsIndex uint
|
||||||
|
|
||||||
|
capSysAdmin := false
|
||||||
|
var cap string
|
||||||
|
var caps []string
|
||||||
|
|
||||||
|
for _, cap = range rs.Process.Capabilities.Bounding {
|
||||||
|
caps = append(caps, cap)
|
||||||
|
}
|
||||||
|
for _, cap = range rs.Process.Capabilities.Effective {
|
||||||
|
caps = append(caps, cap)
|
||||||
|
}
|
||||||
|
for _, cap = range rs.Process.Capabilities.Inheritable {
|
||||||
|
caps = append(caps, cap)
|
||||||
|
}
|
||||||
|
for _, cap = range rs.Process.Capabilities.Permitted {
|
||||||
|
caps = append(caps, cap)
|
||||||
|
}
|
||||||
|
for _, cap = range rs.Process.Capabilities.Ambient {
|
||||||
|
caps = append(caps, cap)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, cap = range caps {
|
||||||
|
switch cap {
|
||||||
|
case "CAP_DAC_READ_SEARCH":
|
||||||
|
syscalls = append(syscalls, []rspec.LinuxSyscall{
|
||||||
|
{
|
||||||
|
Names: []string{"open_by_handle_at"},
|
||||||
|
Action: rspec.ActAllow,
|
||||||
|
Args: []rspec.LinuxSeccompArg{},
|
||||||
|
},
|
||||||
|
}...)
|
||||||
|
case "CAP_SYS_ADMIN":
|
||||||
|
capSysAdmin = true
|
||||||
|
syscalls = append(syscalls, []rspec.LinuxSyscall{
|
||||||
|
{
|
||||||
|
Names: []string{
|
||||||
|
"bpf",
|
||||||
|
"clone",
|
||||||
|
"fanotify_init",
|
||||||
|
"lookup_dcookie",
|
||||||
|
"mount",
|
||||||
|
"name_to_handle_at",
|
||||||
|
"perf_event_open",
|
||||||
|
"setdomainname",
|
||||||
|
"sethostname",
|
||||||
|
"setns",
|
||||||
|
"umount",
|
||||||
|
"umount2",
|
||||||
|
"unshare",
|
||||||
|
},
|
||||||
|
Action: rspec.ActAllow,
|
||||||
|
Args: []rspec.LinuxSeccompArg{},
|
||||||
|
},
|
||||||
|
}...)
|
||||||
|
case "CAP_SYS_BOOT":
|
||||||
|
syscalls = append(syscalls, []rspec.LinuxSyscall{
|
||||||
|
{
|
||||||
|
Names: []string{"reboot"},
|
||||||
|
Action: rspec.ActAllow,
|
||||||
|
Args: []rspec.LinuxSeccompArg{},
|
||||||
|
},
|
||||||
|
}...)
|
||||||
|
case "CAP_SYS_CHROOT":
|
||||||
|
syscalls = append(syscalls, []rspec.LinuxSyscall{
|
||||||
|
{
|
||||||
|
Names: []string{"chroot"},
|
||||||
|
Action: rspec.ActAllow,
|
||||||
|
Args: []rspec.LinuxSeccompArg{},
|
||||||
|
},
|
||||||
|
}...)
|
||||||
|
case "CAP_SYS_MODULE":
|
||||||
|
syscalls = append(syscalls, []rspec.LinuxSyscall{
|
||||||
|
{
|
||||||
|
Names: []string{
|
||||||
|
"delete_module",
|
||||||
|
"init_module",
|
||||||
|
"finit_module",
|
||||||
|
"query_module",
|
||||||
|
},
|
||||||
|
Action: rspec.ActAllow,
|
||||||
|
Args: []rspec.LinuxSeccompArg{},
|
||||||
|
},
|
||||||
|
}...)
|
||||||
|
case "CAP_SYS_PACCT":
|
||||||
|
syscalls = append(syscalls, []rspec.LinuxSyscall{
|
||||||
|
{
|
||||||
|
Names: []string{"acct"},
|
||||||
|
Action: rspec.ActAllow,
|
||||||
|
Args: []rspec.LinuxSeccompArg{},
|
||||||
|
},
|
||||||
|
}...)
|
||||||
|
case "CAP_SYS_PTRACE":
|
||||||
|
syscalls = append(syscalls, []rspec.LinuxSyscall{
|
||||||
|
{
|
||||||
|
Names: []string{
|
||||||
|
"kcmp",
|
||||||
|
"process_vm_readv",
|
||||||
|
"process_vm_writev",
|
||||||
|
"ptrace",
|
||||||
|
},
|
||||||
|
Action: rspec.ActAllow,
|
||||||
|
Args: []rspec.LinuxSeccompArg{},
|
||||||
|
},
|
||||||
|
}...)
|
||||||
|
case "CAP_SYS_RAWIO":
|
||||||
|
syscalls = append(syscalls, []rspec.LinuxSyscall{
|
||||||
|
{
|
||||||
|
Names: []string{
|
||||||
|
"iopl",
|
||||||
|
"ioperm",
|
||||||
|
},
|
||||||
|
Action: rspec.ActAllow,
|
||||||
|
Args: []rspec.LinuxSeccompArg{},
|
||||||
|
},
|
||||||
|
}...)
|
||||||
|
case "CAP_SYS_TIME":
|
||||||
|
syscalls = append(syscalls, []rspec.LinuxSyscall{
|
||||||
|
{
|
||||||
|
Names: []string{
|
||||||
|
"settimeofday",
|
||||||
|
"stime",
|
||||||
|
"adjtimex",
|
||||||
|
},
|
||||||
|
Action: rspec.ActAllow,
|
||||||
|
Args: []rspec.LinuxSeccompArg{},
|
||||||
|
},
|
||||||
|
}...)
|
||||||
|
case "CAP_SYS_TTY_CONFIG":
|
||||||
|
syscalls = append(syscalls, []rspec.LinuxSyscall{
|
||||||
|
{
|
||||||
|
Names: []string{"vhangup"},
|
||||||
|
Action: rspec.ActAllow,
|
||||||
|
Args: []rspec.LinuxSeccompArg{},
|
||||||
|
},
|
||||||
|
}...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !capSysAdmin {
|
||||||
|
syscalls = append(syscalls, []rspec.LinuxSyscall{
|
||||||
|
{
|
||||||
|
Names: []string{"clone"},
|
||||||
|
Action: rspec.ActAllow,
|
||||||
|
Args: []rspec.LinuxSeccompArg{
|
||||||
|
{
|
||||||
|
Index: sysCloneFlagsIndex,
|
||||||
|
Value: syscall.CLONE_NEWNS | syscall.CLONE_NEWUTS | syscall.CLONE_NEWIPC | syscall.CLONE_NEWUSER | syscall.CLONE_NEWPID | syscall.CLONE_NEWNET,
|
||||||
|
ValueTwo: 0,
|
||||||
|
Op: rspec.OpMaskedEqual,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}...)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
arch := runtime.GOARCH
|
||||||
|
switch arch {
|
||||||
|
case "arm", "arm64":
|
||||||
|
syscalls = append(syscalls, []rspec.LinuxSyscall{
|
||||||
|
{
|
||||||
|
Names: []string{
|
||||||
|
"breakpoint",
|
||||||
|
"cacheflush",
|
||||||
|
"set_tls",
|
||||||
|
},
|
||||||
|
Action: rspec.ActAllow,
|
||||||
|
Args: []rspec.LinuxSeccompArg{},
|
||||||
|
},
|
||||||
|
}...)
|
||||||
|
case "amd64", "x32":
|
||||||
|
syscalls = append(syscalls, []rspec.LinuxSyscall{
|
||||||
|
{
|
||||||
|
Names: []string{"arch_prctl"},
|
||||||
|
Action: rspec.ActAllow,
|
||||||
|
Args: []rspec.LinuxSeccompArg{},
|
||||||
|
},
|
||||||
|
}...)
|
||||||
|
fallthrough
|
||||||
|
case "x86":
|
||||||
|
syscalls = append(syscalls, []rspec.LinuxSyscall{
|
||||||
|
{
|
||||||
|
Names: []string{"modify_ldt"},
|
||||||
|
Action: rspec.ActAllow,
|
||||||
|
Args: []rspec.LinuxSeccompArg{},
|
||||||
|
},
|
||||||
|
}...)
|
||||||
|
case "s390", "s390x":
|
||||||
|
syscalls = append(syscalls, []rspec.LinuxSyscall{
|
||||||
|
{
|
||||||
|
Names: []string{
|
||||||
|
"s390_pci_mmio_read",
|
||||||
|
"s390_pci_mmio_write",
|
||||||
|
"s390_runtime_instr",
|
||||||
|
},
|
||||||
|
Action: rspec.ActAllow,
|
||||||
|
Args: []rspec.LinuxSeccompArg{},
|
||||||
|
},
|
||||||
|
}...)
|
||||||
|
/* Flags parameter of the clone syscall is the 2nd on s390 */
|
||||||
|
}
|
||||||
|
|
||||||
|
return &rspec.LinuxSeccomp{
|
||||||
|
DefaultAction: rspec.ActErrno,
|
||||||
|
Architectures: arches(),
|
||||||
|
Syscalls: syscalls,
|
||||||
|
}
|
||||||
|
}
|
140
vendor/github.com/opencontainers/runtime-tools/generate/seccomp/syscall_compare.go
generated
vendored
Normal file
140
vendor/github.com/opencontainers/runtime-tools/generate/seccomp/syscall_compare.go
generated
vendored
Normal file
@ -0,0 +1,140 @@
|
|||||||
|
package seccomp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
rspec "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Determine if a new syscall rule should be appended, overwrite an existing rule
|
||||||
|
// or if no action should be taken at all
|
||||||
|
func decideCourseOfAction(newSyscall *rspec.LinuxSyscall, syscalls []rspec.LinuxSyscall) (string, error) {
|
||||||
|
ruleForSyscallAlreadyExists := false
|
||||||
|
|
||||||
|
var sliceOfDeterminedActions []string
|
||||||
|
for i, syscall := range syscalls {
|
||||||
|
if sameName(&syscall, newSyscall) {
|
||||||
|
ruleForSyscallAlreadyExists = true
|
||||||
|
|
||||||
|
if identical(newSyscall, &syscall) {
|
||||||
|
sliceOfDeterminedActions = append(sliceOfDeterminedActions, nothing)
|
||||||
|
}
|
||||||
|
|
||||||
|
if sameAction(newSyscall, &syscall) {
|
||||||
|
if bothHaveArgs(newSyscall, &syscall) {
|
||||||
|
sliceOfDeterminedActions = append(sliceOfDeterminedActions, seccompAppend)
|
||||||
|
}
|
||||||
|
if onlyOneHasArgs(newSyscall, &syscall) {
|
||||||
|
if firstParamOnlyHasArgs(newSyscall, &syscall) {
|
||||||
|
sliceOfDeterminedActions = append(sliceOfDeterminedActions, "overwrite:"+strconv.Itoa(i))
|
||||||
|
} else {
|
||||||
|
sliceOfDeterminedActions = append(sliceOfDeterminedActions, nothing)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !sameAction(newSyscall, &syscall) {
|
||||||
|
if bothHaveArgs(newSyscall, &syscall) {
|
||||||
|
if sameArgs(newSyscall, &syscall) {
|
||||||
|
sliceOfDeterminedActions = append(sliceOfDeterminedActions, "overwrite:"+strconv.Itoa(i))
|
||||||
|
}
|
||||||
|
if !sameArgs(newSyscall, &syscall) {
|
||||||
|
sliceOfDeterminedActions = append(sliceOfDeterminedActions, seccompAppend)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if onlyOneHasArgs(newSyscall, &syscall) {
|
||||||
|
sliceOfDeterminedActions = append(sliceOfDeterminedActions, seccompAppend)
|
||||||
|
}
|
||||||
|
if neitherHasArgs(newSyscall, &syscall) {
|
||||||
|
sliceOfDeterminedActions = append(sliceOfDeterminedActions, "overwrite:"+strconv.Itoa(i))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !ruleForSyscallAlreadyExists {
|
||||||
|
sliceOfDeterminedActions = append(sliceOfDeterminedActions, seccompAppend)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Nothing has highest priority
|
||||||
|
for _, determinedAction := range sliceOfDeterminedActions {
|
||||||
|
if determinedAction == nothing {
|
||||||
|
return determinedAction, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Overwrite has second highest priority
|
||||||
|
for _, determinedAction := range sliceOfDeterminedActions {
|
||||||
|
if strings.Contains(determinedAction, seccompOverwrite) {
|
||||||
|
return determinedAction, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Append has the lowest priority
|
||||||
|
for _, determinedAction := range sliceOfDeterminedActions {
|
||||||
|
if determinedAction == seccompAppend {
|
||||||
|
return determinedAction, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", fmt.Errorf("Trouble determining action: %s", sliceOfDeterminedActions)
|
||||||
|
}
|
||||||
|
|
||||||
|
func hasArguments(config *rspec.LinuxSyscall) bool {
|
||||||
|
nilSyscall := new(rspec.LinuxSyscall)
|
||||||
|
return !sameArgs(nilSyscall, config)
|
||||||
|
}
|
||||||
|
|
||||||
|
func identical(config1, config2 *rspec.LinuxSyscall) bool {
|
||||||
|
return reflect.DeepEqual(config1, config2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func identicalExceptAction(config1, config2 *rspec.LinuxSyscall) bool {
|
||||||
|
samename := sameName(config1, config2)
|
||||||
|
sameAction := sameAction(config1, config2)
|
||||||
|
sameArgs := sameArgs(config1, config2)
|
||||||
|
|
||||||
|
return samename && !sameAction && sameArgs
|
||||||
|
}
|
||||||
|
|
||||||
|
func identicalExceptArgs(config1, config2 *rspec.LinuxSyscall) bool {
|
||||||
|
samename := sameName(config1, config2)
|
||||||
|
sameAction := sameAction(config1, config2)
|
||||||
|
sameArgs := sameArgs(config1, config2)
|
||||||
|
|
||||||
|
return samename && sameAction && !sameArgs
|
||||||
|
}
|
||||||
|
|
||||||
|
func sameName(config1, config2 *rspec.LinuxSyscall) bool {
|
||||||
|
return reflect.DeepEqual(config1.Names, config2.Names)
|
||||||
|
}
|
||||||
|
|
||||||
|
func sameAction(config1, config2 *rspec.LinuxSyscall) bool {
|
||||||
|
return config1.Action == config2.Action
|
||||||
|
}
|
||||||
|
|
||||||
|
func sameArgs(config1, config2 *rspec.LinuxSyscall) bool {
|
||||||
|
return reflect.DeepEqual(config1.Args, config2.Args)
|
||||||
|
}
|
||||||
|
|
||||||
|
func bothHaveArgs(config1, config2 *rspec.LinuxSyscall) bool {
|
||||||
|
return hasArguments(config1) && hasArguments(config2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func onlyOneHasArgs(config1, config2 *rspec.LinuxSyscall) bool {
|
||||||
|
conf1 := hasArguments(config1)
|
||||||
|
conf2 := hasArguments(config2)
|
||||||
|
|
||||||
|
return (conf1 && !conf2) || (!conf1 && conf2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func neitherHasArgs(config1, config2 *rspec.LinuxSyscall) bool {
|
||||||
|
return !hasArguments(config1) && !hasArguments(config2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func firstParamOnlyHasArgs(config1, config2 *rspec.LinuxSyscall) bool {
|
||||||
|
return !hasArguments(config1) && hasArguments(config2)
|
||||||
|
}
|
74
vendor/github.com/opencontainers/runtime-tools/generate/spec.go
generated
vendored
Normal file
74
vendor/github.com/opencontainers/runtime-tools/generate/spec.go
generated
vendored
Normal file
@ -0,0 +1,74 @@
|
|||||||
|
package generate
|
||||||
|
|
||||||
|
import (
|
||||||
|
rspec "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (g *Generator) initSpec() {
|
||||||
|
if g.spec == nil {
|
||||||
|
g.spec = &rspec.Spec{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *Generator) initSpecAnnotations() {
|
||||||
|
g.initSpec()
|
||||||
|
if g.spec.Annotations == nil {
|
||||||
|
g.spec.Annotations = make(map[string]string)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *Generator) initSpecLinux() {
|
||||||
|
g.initSpec()
|
||||||
|
if g.spec.Linux == nil {
|
||||||
|
g.spec.Linux = &rspec.Linux{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *Generator) initSpecLinuxSysctl() {
|
||||||
|
g.initSpecLinux()
|
||||||
|
if g.spec.Linux.Sysctl == nil {
|
||||||
|
g.spec.Linux.Sysctl = make(map[string]string)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *Generator) initSpecLinuxSeccomp() {
|
||||||
|
g.initSpecLinux()
|
||||||
|
if g.spec.Linux.Seccomp == nil {
|
||||||
|
g.spec.Linux.Seccomp = &rspec.LinuxSeccomp{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *Generator) initSpecLinuxResources() {
|
||||||
|
g.initSpecLinux()
|
||||||
|
if g.spec.Linux.Resources == nil {
|
||||||
|
g.spec.Linux.Resources = &rspec.LinuxResources{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *Generator) initSpecLinuxResourcesCPU() {
|
||||||
|
g.initSpecLinuxResources()
|
||||||
|
if g.spec.Linux.Resources.CPU == nil {
|
||||||
|
g.spec.Linux.Resources.CPU = &rspec.LinuxCPU{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *Generator) initSpecLinuxResourcesMemory() {
|
||||||
|
g.initSpecLinuxResources()
|
||||||
|
if g.spec.Linux.Resources.Memory == nil {
|
||||||
|
g.spec.Linux.Resources.Memory = &rspec.LinuxMemory{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *Generator) initSpecLinuxResourcesNetwork() {
|
||||||
|
g.initSpecLinuxResources()
|
||||||
|
if g.spec.Linux.Resources.Network == nil {
|
||||||
|
g.spec.Linux.Resources.Network = &rspec.LinuxNetwork{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *Generator) initSpecLinuxResourcesPids() {
|
||||||
|
g.initSpecLinuxResources()
|
||||||
|
if g.spec.Linux.Resources.Pids == nil {
|
||||||
|
g.spec.Linux.Resources.Pids = &rspec.LinuxPids{}
|
||||||
|
}
|
||||||
|
}
|
815
vendor/github.com/opencontainers/runtime-tools/validate/validate.go
generated
vendored
Normal file
815
vendor/github.com/opencontainers/runtime-tools/validate/validate.go
generated
vendored
Normal file
@ -0,0 +1,815 @@
|
|||||||
|
package validate
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
"unicode"
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
"github.com/blang/semver"
|
||||||
|
rspec "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
|
"github.com/syndtr/gocapability/capability"
|
||||||
|
)
|
||||||
|
|
||||||
|
const specConfig = "config.json"
|
||||||
|
|
||||||
|
var (
|
||||||
|
defaultRlimits = []string{
|
||||||
|
"RLIMIT_AS",
|
||||||
|
"RLIMIT_CORE",
|
||||||
|
"RLIMIT_CPU",
|
||||||
|
"RLIMIT_DATA",
|
||||||
|
"RLIMIT_FSIZE",
|
||||||
|
"RLIMIT_LOCKS",
|
||||||
|
"RLIMIT_MEMLOCK",
|
||||||
|
"RLIMIT_MSGQUEUE",
|
||||||
|
"RLIMIT_NICE",
|
||||||
|
"RLIMIT_NOFILE",
|
||||||
|
"RLIMIT_NPROC",
|
||||||
|
"RLIMIT_RSS",
|
||||||
|
"RLIMIT_RTPRIO",
|
||||||
|
"RLIMIT_RTTIME",
|
||||||
|
"RLIMIT_SIGPENDING",
|
||||||
|
"RLIMIT_STACK",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// Validator represents a validator for runtime bundle
|
||||||
|
type Validator struct {
|
||||||
|
spec *rspec.Spec
|
||||||
|
bundlePath string
|
||||||
|
HostSpecific bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewValidator creates a Validator
|
||||||
|
func NewValidator(spec *rspec.Spec, bundlePath string, hostSpecific bool) Validator {
|
||||||
|
return Validator{spec: spec, bundlePath: bundlePath, HostSpecific: hostSpecific}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewValidatorFromPath creates a Validator with specified bundle path
|
||||||
|
func NewValidatorFromPath(bundlePath string, hostSpecific bool) (Validator, error) {
|
||||||
|
if bundlePath == "" {
|
||||||
|
return Validator{}, fmt.Errorf("Bundle path shouldn't be empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := os.Stat(bundlePath); err != nil {
|
||||||
|
return Validator{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
configPath := filepath.Join(bundlePath, specConfig)
|
||||||
|
content, err := ioutil.ReadFile(configPath)
|
||||||
|
if err != nil {
|
||||||
|
return Validator{}, err
|
||||||
|
}
|
||||||
|
if !utf8.Valid(content) {
|
||||||
|
return Validator{}, fmt.Errorf("%q is not encoded in UTF-8", configPath)
|
||||||
|
}
|
||||||
|
var spec rspec.Spec
|
||||||
|
if err = json.Unmarshal(content, &spec); err != nil {
|
||||||
|
return Validator{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return NewValidator(&spec, bundlePath, hostSpecific), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CheckAll checks all parts of runtime bundle
|
||||||
|
func (v *Validator) CheckAll() (msgs []string) {
|
||||||
|
msgs = append(msgs, v.CheckRootfsPath()...)
|
||||||
|
msgs = append(msgs, v.CheckMandatoryFields()...)
|
||||||
|
msgs = append(msgs, v.CheckSemVer()...)
|
||||||
|
msgs = append(msgs, v.CheckMounts()...)
|
||||||
|
msgs = append(msgs, v.CheckPlatform()...)
|
||||||
|
msgs = append(msgs, v.CheckProcess()...)
|
||||||
|
msgs = append(msgs, v.CheckOS()...)
|
||||||
|
msgs = append(msgs, v.CheckLinux()...)
|
||||||
|
msgs = append(msgs, v.CheckHooks()...)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// CheckRootfsPath checks status of v.spec.Root.Path
|
||||||
|
func (v *Validator) CheckRootfsPath() (msgs []string) {
|
||||||
|
logrus.Debugf("check rootfs path")
|
||||||
|
|
||||||
|
absBundlePath, err := filepath.Abs(v.bundlePath)
|
||||||
|
if err != nil {
|
||||||
|
msgs = append(msgs, fmt.Sprintf("unable to convert %q to an absolute path", v.bundlePath))
|
||||||
|
}
|
||||||
|
|
||||||
|
var rootfsPath string
|
||||||
|
var absRootPath string
|
||||||
|
if filepath.IsAbs(v.spec.Root.Path) {
|
||||||
|
rootfsPath = v.spec.Root.Path
|
||||||
|
absRootPath = filepath.Clean(rootfsPath)
|
||||||
|
} else {
|
||||||
|
var err error
|
||||||
|
rootfsPath = filepath.Join(v.bundlePath, v.spec.Root.Path)
|
||||||
|
absRootPath, err = filepath.Abs(rootfsPath)
|
||||||
|
if err != nil {
|
||||||
|
msgs = append(msgs, fmt.Sprintf("unable to convert %q to an absolute path", rootfsPath))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if fi, err := os.Stat(rootfsPath); err != nil {
|
||||||
|
msgs = append(msgs, fmt.Sprintf("Cannot find the root path %q", rootfsPath))
|
||||||
|
} else if !fi.IsDir() {
|
||||||
|
msgs = append(msgs, fmt.Sprintf("The root path %q is not a directory.", rootfsPath))
|
||||||
|
}
|
||||||
|
|
||||||
|
rootParent := filepath.Dir(absRootPath)
|
||||||
|
if absRootPath == string(filepath.Separator) || rootParent != absBundlePath {
|
||||||
|
msgs = append(msgs, fmt.Sprintf("root.path is %q, but it MUST be a child of %q", v.spec.Root.Path, absBundlePath))
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// CheckSemVer checks v.spec.Version
|
||||||
|
func (v *Validator) CheckSemVer() (msgs []string) {
|
||||||
|
logrus.Debugf("check semver")
|
||||||
|
|
||||||
|
version := v.spec.Version
|
||||||
|
_, err := semver.Parse(version)
|
||||||
|
if err != nil {
|
||||||
|
msgs = append(msgs, fmt.Sprintf("%q is not valid SemVer: %s", version, err.Error()))
|
||||||
|
}
|
||||||
|
if version != rspec.Version {
|
||||||
|
msgs = append(msgs, fmt.Sprintf("internal error: validate currently only handles version %s, but the supplied configuration targets %s", rspec.Version, version))
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// CheckPlatform checks v.spec.Platform
|
||||||
|
func (v *Validator) CheckPlatform() (msgs []string) {
|
||||||
|
logrus.Debugf("check platform")
|
||||||
|
|
||||||
|
validCombins := map[string][]string{
|
||||||
|
"android": {"arm"},
|
||||||
|
"darwin": {"386", "amd64", "arm", "arm64"},
|
||||||
|
"dragonfly": {"amd64"},
|
||||||
|
"freebsd": {"386", "amd64", "arm"},
|
||||||
|
"linux": {"386", "amd64", "arm", "arm64", "ppc64", "ppc64le", "mips64", "mips64le", "s390x"},
|
||||||
|
"netbsd": {"386", "amd64", "arm"},
|
||||||
|
"openbsd": {"386", "amd64", "arm"},
|
||||||
|
"plan9": {"386", "amd64"},
|
||||||
|
"solaris": {"amd64"},
|
||||||
|
"windows": {"386", "amd64"}}
|
||||||
|
platform := v.spec.Platform
|
||||||
|
for os, archs := range validCombins {
|
||||||
|
if os == platform.OS {
|
||||||
|
for _, arch := range archs {
|
||||||
|
if arch == platform.Arch {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
msgs = append(msgs, fmt.Sprintf("Combination of %q and %q is invalid.", platform.OS, platform.Arch))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
msgs = append(msgs, fmt.Sprintf("Operation system %q of the bundle is not supported yet.", platform.OS))
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// CheckHooks check v.spec.Hooks
|
||||||
|
func (v *Validator) CheckHooks() (msgs []string) {
|
||||||
|
logrus.Debugf("check hooks")
|
||||||
|
|
||||||
|
if v.spec.Hooks != nil {
|
||||||
|
msgs = append(msgs, checkEventHooks("pre-start", v.spec.Hooks.Prestart, v.HostSpecific)...)
|
||||||
|
msgs = append(msgs, checkEventHooks("post-start", v.spec.Hooks.Poststart, v.HostSpecific)...)
|
||||||
|
msgs = append(msgs, checkEventHooks("post-stop", v.spec.Hooks.Poststop, v.HostSpecific)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkEventHooks(hookType string, hooks []rspec.Hook, hostSpecific bool) (msgs []string) {
|
||||||
|
for _, hook := range hooks {
|
||||||
|
if !filepath.IsAbs(hook.Path) {
|
||||||
|
msgs = append(msgs, fmt.Sprintf("The %s hook %v: is not absolute path", hookType, hook.Path))
|
||||||
|
}
|
||||||
|
|
||||||
|
if hostSpecific {
|
||||||
|
fi, err := os.Stat(hook.Path)
|
||||||
|
if err != nil {
|
||||||
|
msgs = append(msgs, fmt.Sprintf("Cannot find %s hook: %v", hookType, hook.Path))
|
||||||
|
}
|
||||||
|
if fi.Mode()&0111 == 0 {
|
||||||
|
msgs = append(msgs, fmt.Sprintf("The %s hook %v: is not executable", hookType, hook.Path))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, env := range hook.Env {
|
||||||
|
if !envValid(env) {
|
||||||
|
msgs = append(msgs, fmt.Sprintf("Env %q for hook %v is in the invalid form.", env, hook.Path))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// CheckProcess checks v.spec.Process
|
||||||
|
func (v *Validator) CheckProcess() (msgs []string) {
|
||||||
|
logrus.Debugf("check process")
|
||||||
|
|
||||||
|
process := v.spec.Process
|
||||||
|
if !filepath.IsAbs(process.Cwd) {
|
||||||
|
msgs = append(msgs, fmt.Sprintf("cwd %q is not an absolute path", process.Cwd))
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, env := range process.Env {
|
||||||
|
if !envValid(env) {
|
||||||
|
msgs = append(msgs, fmt.Sprintf("env %q should be in the form of 'key=value'. The left hand side must consist solely of letters, digits, and underscores '_'.", env))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(process.Args) == 0 {
|
||||||
|
msgs = append(msgs, fmt.Sprintf("args must not be empty"))
|
||||||
|
} else {
|
||||||
|
if filepath.IsAbs(process.Args[0]) {
|
||||||
|
var rootfsPath string
|
||||||
|
if filepath.IsAbs(v.spec.Root.Path) {
|
||||||
|
rootfsPath = v.spec.Root.Path
|
||||||
|
} else {
|
||||||
|
rootfsPath = filepath.Join(v.bundlePath, v.spec.Root.Path)
|
||||||
|
}
|
||||||
|
absPath := filepath.Join(rootfsPath, process.Args[0])
|
||||||
|
fileinfo, err := os.Stat(absPath)
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
logrus.Warnf("executable %q is not available in rootfs currently", process.Args[0])
|
||||||
|
} else if err != nil {
|
||||||
|
msgs = append(msgs, err.Error())
|
||||||
|
} else {
|
||||||
|
m := fileinfo.Mode()
|
||||||
|
if m.IsDir() || m&0111 == 0 {
|
||||||
|
msgs = append(msgs, fmt.Sprintf("arg %q is not executable", process.Args[0]))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
msgs = append(msgs, v.CheckCapablities()...)
|
||||||
|
msgs = append(msgs, v.CheckRlimits()...)
|
||||||
|
|
||||||
|
if v.spec.Platform.OS == "linux" {
|
||||||
|
|
||||||
|
if len(process.ApparmorProfile) > 0 {
|
||||||
|
profilePath := filepath.Join(v.bundlePath, v.spec.Root.Path, "/etc/apparmor.d", process.ApparmorProfile)
|
||||||
|
_, err := os.Stat(profilePath)
|
||||||
|
if err != nil {
|
||||||
|
msgs = append(msgs, err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *Validator) CheckCapablities() (msgs []string) {
|
||||||
|
process := v.spec.Process
|
||||||
|
if v.spec.Platform.OS == "linux" {
|
||||||
|
var caps []string
|
||||||
|
|
||||||
|
for _, cap := range process.Capabilities.Bounding {
|
||||||
|
caps = append(caps, cap)
|
||||||
|
}
|
||||||
|
for _, cap := range process.Capabilities.Effective {
|
||||||
|
caps = append(caps, cap)
|
||||||
|
}
|
||||||
|
for _, cap := range process.Capabilities.Inheritable {
|
||||||
|
caps = append(caps, cap)
|
||||||
|
}
|
||||||
|
for _, cap := range process.Capabilities.Permitted {
|
||||||
|
caps = append(caps, cap)
|
||||||
|
}
|
||||||
|
for _, cap := range process.Capabilities.Ambient {
|
||||||
|
caps = append(caps, cap)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, capability := range caps {
|
||||||
|
if err := CapValid(capability, v.HostSpecific); err != nil {
|
||||||
|
msgs = append(msgs, fmt.Sprintf("capability %q is not valid, man capabilities(7)", capability))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
logrus.Warnf("process.capabilities validation not yet implemented for OS %q", v.spec.Platform.OS)
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *Validator) CheckRlimits() (msgs []string) {
|
||||||
|
process := v.spec.Process
|
||||||
|
for index, rlimit := range process.Rlimits {
|
||||||
|
for i := index + 1; i < len(process.Rlimits); i++ {
|
||||||
|
if process.Rlimits[index].Type == process.Rlimits[i].Type {
|
||||||
|
msgs = append(msgs, fmt.Sprintf("rlimit can not contain the same type %q.", process.Rlimits[index].Type))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if v.spec.Platform.OS == "linux" {
|
||||||
|
if err := rlimitValid(rlimit); err != nil {
|
||||||
|
msgs = append(msgs, err.Error())
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
logrus.Warnf("process.rlimits validation not yet implemented for OS %q", v.spec.Platform.OS)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func supportedMountTypes(OS string, hostSpecific bool) (map[string]bool, error) {
|
||||||
|
supportedTypes := make(map[string]bool)
|
||||||
|
|
||||||
|
if OS != "linux" && OS != "windows" {
|
||||||
|
logrus.Warnf("%v is not supported to check mount type", OS)
|
||||||
|
return nil, nil
|
||||||
|
} else if OS == "windows" {
|
||||||
|
supportedTypes["ntfs"] = true
|
||||||
|
return supportedTypes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if hostSpecific {
|
||||||
|
f, err := os.Open("/proc/filesystems")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
s := bufio.NewScanner(f)
|
||||||
|
for s.Scan() {
|
||||||
|
if err := s.Err(); err != nil {
|
||||||
|
return supportedTypes, err
|
||||||
|
}
|
||||||
|
|
||||||
|
text := s.Text()
|
||||||
|
parts := strings.Split(text, "\t")
|
||||||
|
if len(parts) > 1 {
|
||||||
|
supportedTypes[parts[1]] = true
|
||||||
|
} else {
|
||||||
|
supportedTypes[parts[0]] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
supportedTypes["bind"] = true
|
||||||
|
|
||||||
|
return supportedTypes, nil
|
||||||
|
}
|
||||||
|
logrus.Warn("Checking linux mount types without --host-specific is not supported yet")
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CheckMounts checks v.spec.Mounts
|
||||||
|
func (v *Validator) CheckMounts() (msgs []string) {
|
||||||
|
logrus.Debugf("check mounts")
|
||||||
|
|
||||||
|
supportedTypes, err := supportedMountTypes(v.spec.Platform.OS, v.HostSpecific)
|
||||||
|
if err != nil {
|
||||||
|
msgs = append(msgs, err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if supportedTypes != nil {
|
||||||
|
for _, mount := range v.spec.Mounts {
|
||||||
|
if !supportedTypes[mount.Type] {
|
||||||
|
msgs = append(msgs, fmt.Sprintf("Unsupported mount type %q", mount.Type))
|
||||||
|
}
|
||||||
|
|
||||||
|
if !filepath.IsAbs(mount.Destination) {
|
||||||
|
msgs = append(msgs, fmt.Sprintf("destination %v is not an absolute path", mount.Destination))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// CheckOS checks v.spec.Platform.OS
|
||||||
|
func (v *Validator) CheckOS() (msgs []string) {
|
||||||
|
logrus.Debugf("check os")
|
||||||
|
|
||||||
|
if v.spec.Platform.OS != "linux" {
|
||||||
|
if v.spec.Linux != nil {
|
||||||
|
msgs = append(msgs, fmt.Sprintf("'linux' MUST NOT be set when platform.os is %q", v.spec.Platform.OS))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if v.spec.Platform.OS != "solaris" {
|
||||||
|
if v.spec.Solaris != nil {
|
||||||
|
msgs = append(msgs, fmt.Sprintf("'solaris' MUST NOT be set when platform.os is %q", v.spec.Platform.OS))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if v.spec.Platform.OS != "windows" {
|
||||||
|
if v.spec.Windows != nil {
|
||||||
|
msgs = append(msgs, fmt.Sprintf("'windows' MUST NOT be set when platform.os is %q", v.spec.Platform.OS))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// CheckLinux checks v.spec.Linux
|
||||||
|
func (v *Validator) CheckLinux() (msgs []string) {
|
||||||
|
logrus.Debugf("check linux")
|
||||||
|
|
||||||
|
var typeList = map[rspec.LinuxNamespaceType]struct {
|
||||||
|
num int
|
||||||
|
newExist bool
|
||||||
|
}{
|
||||||
|
rspec.PIDNamespace: {0, false},
|
||||||
|
rspec.NetworkNamespace: {0, false},
|
||||||
|
rspec.MountNamespace: {0, false},
|
||||||
|
rspec.IPCNamespace: {0, false},
|
||||||
|
rspec.UTSNamespace: {0, false},
|
||||||
|
rspec.UserNamespace: {0, false},
|
||||||
|
rspec.CgroupNamespace: {0, false},
|
||||||
|
}
|
||||||
|
|
||||||
|
for index := 0; index < len(v.spec.Linux.Namespaces); index++ {
|
||||||
|
ns := v.spec.Linux.Namespaces[index]
|
||||||
|
if !namespaceValid(ns) {
|
||||||
|
msgs = append(msgs, fmt.Sprintf("namespace %v is invalid.", ns))
|
||||||
|
}
|
||||||
|
|
||||||
|
tmpItem := typeList[ns.Type]
|
||||||
|
tmpItem.num = tmpItem.num + 1
|
||||||
|
if tmpItem.num > 1 {
|
||||||
|
msgs = append(msgs, fmt.Sprintf("duplicated namespace %q", ns.Type))
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(ns.Path) == 0 {
|
||||||
|
tmpItem.newExist = true
|
||||||
|
}
|
||||||
|
typeList[ns.Type] = tmpItem
|
||||||
|
}
|
||||||
|
|
||||||
|
if (len(v.spec.Linux.UIDMappings) > 0 || len(v.spec.Linux.GIDMappings) > 0) && !typeList[rspec.UserNamespace].newExist {
|
||||||
|
msgs = append(msgs, "UID/GID mappings requires a new User namespace to be specified as well")
|
||||||
|
} else if len(v.spec.Linux.UIDMappings) > 5 {
|
||||||
|
msgs = append(msgs, "Only 5 UID mappings are allowed (linux kernel restriction).")
|
||||||
|
} else if len(v.spec.Linux.GIDMappings) > 5 {
|
||||||
|
msgs = append(msgs, "Only 5 GID mappings are allowed (linux kernel restriction).")
|
||||||
|
}
|
||||||
|
|
||||||
|
for k := range v.spec.Linux.Sysctl {
|
||||||
|
if strings.HasPrefix(k, "net.") && !typeList[rspec.NetworkNamespace].newExist {
|
||||||
|
msgs = append(msgs, fmt.Sprintf("Sysctl %v requires a new Network namespace to be specified as well", k))
|
||||||
|
}
|
||||||
|
if strings.HasPrefix(k, "fs.mqueue.") {
|
||||||
|
if !typeList[rspec.MountNamespace].newExist || !typeList[rspec.IPCNamespace].newExist {
|
||||||
|
msgs = append(msgs, fmt.Sprintf("Sysctl %v requires a new IPC namespace and Mount namespace to be specified as well", k))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if v.spec.Platform.OS == "linux" && !typeList[rspec.UTSNamespace].newExist && v.spec.Hostname != "" {
|
||||||
|
msgs = append(msgs, fmt.Sprintf("On Linux, hostname requires a new UTS namespace to be specified as well"))
|
||||||
|
}
|
||||||
|
|
||||||
|
for index := 0; index < len(v.spec.Linux.Devices); index++ {
|
||||||
|
if !deviceValid(v.spec.Linux.Devices[index]) {
|
||||||
|
msgs = append(msgs, fmt.Sprintf("device %v is invalid.", v.spec.Linux.Devices[index]))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if v.spec.Linux.Resources != nil {
|
||||||
|
ms := v.CheckLinuxResources()
|
||||||
|
msgs = append(msgs, ms...)
|
||||||
|
}
|
||||||
|
|
||||||
|
if v.spec.Linux.Seccomp != nil {
|
||||||
|
ms := v.CheckSeccomp()
|
||||||
|
msgs = append(msgs, ms...)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch v.spec.Linux.RootfsPropagation {
|
||||||
|
case "":
|
||||||
|
case "private":
|
||||||
|
case "rprivate":
|
||||||
|
case "slave":
|
||||||
|
case "rslave":
|
||||||
|
case "shared":
|
||||||
|
case "rshared":
|
||||||
|
default:
|
||||||
|
msgs = append(msgs, "rootfsPropagation must be empty or one of \"private|rprivate|slave|rslave|shared|rshared\"")
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, maskedPath := range v.spec.Linux.MaskedPaths {
|
||||||
|
if !strings.HasPrefix(maskedPath, "/") {
|
||||||
|
msgs = append(msgs, "maskedPath %v is not an absolute path", maskedPath)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, readonlyPath := range v.spec.Linux.ReadonlyPaths {
|
||||||
|
if !strings.HasPrefix(readonlyPath, "/") {
|
||||||
|
msgs = append(msgs, "readonlyPath %v is not an absolute path", readonlyPath)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// CheckLinuxResources checks v.spec.Linux.Resources
|
||||||
|
func (v *Validator) CheckLinuxResources() (msgs []string) {
|
||||||
|
logrus.Debugf("check linux resources")
|
||||||
|
|
||||||
|
r := v.spec.Linux.Resources
|
||||||
|
if r.Memory != nil {
|
||||||
|
if r.Memory.Limit != nil && r.Memory.Swap != nil && uint64(*r.Memory.Limit) > uint64(*r.Memory.Swap) {
|
||||||
|
msgs = append(msgs, fmt.Sprintf("Minimum memoryswap should be larger than memory limit"))
|
||||||
|
}
|
||||||
|
if r.Memory.Limit != nil && r.Memory.Reservation != nil && uint64(*r.Memory.Reservation) > uint64(*r.Memory.Limit) {
|
||||||
|
msgs = append(msgs, fmt.Sprintf("Minimum memory limit should be larger than memory reservation"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if r.Network != nil && v.HostSpecific {
|
||||||
|
var exist bool
|
||||||
|
interfaces, err := net.Interfaces()
|
||||||
|
if err != nil {
|
||||||
|
msgs = append(msgs, err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for _, prio := range r.Network.Priorities {
|
||||||
|
exist = false
|
||||||
|
for _, ni := range interfaces {
|
||||||
|
if prio.Name == ni.Name {
|
||||||
|
exist = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !exist {
|
||||||
|
msgs = append(msgs, fmt.Sprintf("Interface %s does not exist currently", prio.Name))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// CheckSeccomp checkc v.spec.Linux.Seccomp
|
||||||
|
func (v *Validator) CheckSeccomp() (msgs []string) {
|
||||||
|
logrus.Debugf("check linux seccomp")
|
||||||
|
|
||||||
|
s := v.spec.Linux.Seccomp
|
||||||
|
if !seccompActionValid(s.DefaultAction) {
|
||||||
|
msgs = append(msgs, fmt.Sprintf("seccomp defaultAction %q is invalid.", s.DefaultAction))
|
||||||
|
}
|
||||||
|
for index := 0; index < len(s.Syscalls); index++ {
|
||||||
|
if !syscallValid(s.Syscalls[index]) {
|
||||||
|
msgs = append(msgs, fmt.Sprintf("syscall %v is invalid.", s.Syscalls[index]))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for index := 0; index < len(s.Architectures); index++ {
|
||||||
|
switch s.Architectures[index] {
|
||||||
|
case rspec.ArchX86:
|
||||||
|
case rspec.ArchX86_64:
|
||||||
|
case rspec.ArchX32:
|
||||||
|
case rspec.ArchARM:
|
||||||
|
case rspec.ArchAARCH64:
|
||||||
|
case rspec.ArchMIPS:
|
||||||
|
case rspec.ArchMIPS64:
|
||||||
|
case rspec.ArchMIPS64N32:
|
||||||
|
case rspec.ArchMIPSEL:
|
||||||
|
case rspec.ArchMIPSEL64:
|
||||||
|
case rspec.ArchMIPSEL64N32:
|
||||||
|
case rspec.ArchPPC:
|
||||||
|
case rspec.ArchPPC64:
|
||||||
|
case rspec.ArchPPC64LE:
|
||||||
|
case rspec.ArchS390:
|
||||||
|
case rspec.ArchS390X:
|
||||||
|
case rspec.ArchPARISC:
|
||||||
|
case rspec.ArchPARISC64:
|
||||||
|
default:
|
||||||
|
msgs = append(msgs, fmt.Sprintf("seccomp architecture %q is invalid", s.Architectures[index]))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// CapValid checks whether a capability is valid
|
||||||
|
func CapValid(c string, hostSpecific bool) error {
|
||||||
|
isValid := false
|
||||||
|
|
||||||
|
if !strings.HasPrefix(c, "CAP_") {
|
||||||
|
return fmt.Errorf("capability %s must start with CAP_", c)
|
||||||
|
}
|
||||||
|
for _, cap := range capability.List() {
|
||||||
|
if c == fmt.Sprintf("CAP_%s", strings.ToUpper(cap.String())) {
|
||||||
|
if hostSpecific && cap > LastCap() {
|
||||||
|
return fmt.Errorf("CAP_%s is not supported on the current host", c)
|
||||||
|
}
|
||||||
|
isValid = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !isValid {
|
||||||
|
return fmt.Errorf("Invalid capability: %s", c)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// LastCap return last cap of system
|
||||||
|
func LastCap() capability.Cap {
|
||||||
|
last := capability.CAP_LAST_CAP
|
||||||
|
// hack for RHEL6 which has no /proc/sys/kernel/cap_last_cap
|
||||||
|
if last == capability.Cap(63) {
|
||||||
|
last = capability.CAP_BLOCK_SUSPEND
|
||||||
|
}
|
||||||
|
|
||||||
|
return last
|
||||||
|
}
|
||||||
|
|
||||||
|
func envValid(env string) bool {
|
||||||
|
items := strings.Split(env, "=")
|
||||||
|
if len(items) < 2 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for i, ch := range strings.TrimSpace(items[0]) {
|
||||||
|
if !unicode.IsDigit(ch) && !unicode.IsLetter(ch) && ch != '_' {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if i == 0 && unicode.IsDigit(ch) {
|
||||||
|
logrus.Warnf("Env %v: variable name beginning with digit is not recommended.", env)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func rlimitValid(rlimit rspec.LinuxRlimit) error {
|
||||||
|
if rlimit.Hard < rlimit.Soft {
|
||||||
|
return fmt.Errorf("hard limit of rlimit %s should not be less than soft limit", rlimit.Type)
|
||||||
|
}
|
||||||
|
for _, val := range defaultRlimits {
|
||||||
|
if val == rlimit.Type {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fmt.Errorf("rlimit type %q is invalid", rlimit.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
func namespaceValid(ns rspec.LinuxNamespace) bool {
|
||||||
|
switch ns.Type {
|
||||||
|
case rspec.PIDNamespace:
|
||||||
|
case rspec.NetworkNamespace:
|
||||||
|
case rspec.MountNamespace:
|
||||||
|
case rspec.IPCNamespace:
|
||||||
|
case rspec.UTSNamespace:
|
||||||
|
case rspec.UserNamespace:
|
||||||
|
case rspec.CgroupNamespace:
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if ns.Path != "" && !filepath.IsAbs(ns.Path) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func deviceValid(d rspec.LinuxDevice) bool {
|
||||||
|
switch d.Type {
|
||||||
|
case "b", "c", "u":
|
||||||
|
if d.Major <= 0 || d.Minor <= 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
case "p":
|
||||||
|
if d.Major > 0 || d.Minor > 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func seccompActionValid(secc rspec.LinuxSeccompAction) bool {
|
||||||
|
switch secc {
|
||||||
|
case "":
|
||||||
|
case rspec.ActKill:
|
||||||
|
case rspec.ActTrap:
|
||||||
|
case rspec.ActErrno:
|
||||||
|
case rspec.ActTrace:
|
||||||
|
case rspec.ActAllow:
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func syscallValid(s rspec.LinuxSyscall) bool {
|
||||||
|
if !seccompActionValid(s.Action) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for index := 0; index < len(s.Args); index++ {
|
||||||
|
arg := s.Args[index]
|
||||||
|
switch arg.Op {
|
||||||
|
case rspec.OpNotEqual:
|
||||||
|
case rspec.OpLessThan:
|
||||||
|
case rspec.OpLessEqual:
|
||||||
|
case rspec.OpEqualTo:
|
||||||
|
case rspec.OpGreaterEqual:
|
||||||
|
case rspec.OpGreaterThan:
|
||||||
|
case rspec.OpMaskedEqual:
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func isStruct(t reflect.Type) bool {
|
||||||
|
return t.Kind() == reflect.Struct
|
||||||
|
}
|
||||||
|
|
||||||
|
func isStructPtr(t reflect.Type) bool {
|
||||||
|
return t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkMandatoryUnit(field reflect.Value, tagField reflect.StructField, parent string) (msgs []string) {
|
||||||
|
mandatory := !strings.Contains(tagField.Tag.Get("json"), "omitempty")
|
||||||
|
switch field.Kind() {
|
||||||
|
case reflect.Ptr:
|
||||||
|
if mandatory && field.IsNil() {
|
||||||
|
msgs = append(msgs, fmt.Sprintf("'%s.%s' should not be empty.", parent, tagField.Name))
|
||||||
|
}
|
||||||
|
case reflect.String:
|
||||||
|
if mandatory && (field.Len() == 0) {
|
||||||
|
msgs = append(msgs, fmt.Sprintf("'%s.%s' should not be empty.", parent, tagField.Name))
|
||||||
|
}
|
||||||
|
case reflect.Slice:
|
||||||
|
if mandatory && (field.IsNil() || field.Len() == 0) {
|
||||||
|
msgs = append(msgs, fmt.Sprintf("'%s.%s' should not be empty.", parent, tagField.Name))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for index := 0; index < field.Len(); index++ {
|
||||||
|
mValue := field.Index(index)
|
||||||
|
if mValue.CanInterface() {
|
||||||
|
msgs = append(msgs, checkMandatory(mValue.Interface())...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case reflect.Map:
|
||||||
|
if mandatory && (field.IsNil() || field.Len() == 0) {
|
||||||
|
msgs = append(msgs, fmt.Sprintf("'%s.%s' should not be empty.", parent, tagField.Name))
|
||||||
|
return msgs
|
||||||
|
}
|
||||||
|
keys := field.MapKeys()
|
||||||
|
for index := 0; index < len(keys); index++ {
|
||||||
|
mValue := field.MapIndex(keys[index])
|
||||||
|
if mValue.CanInterface() {
|
||||||
|
msgs = append(msgs, checkMandatory(mValue.Interface())...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkMandatory(obj interface{}) (msgs []string) {
|
||||||
|
objT := reflect.TypeOf(obj)
|
||||||
|
objV := reflect.ValueOf(obj)
|
||||||
|
if isStructPtr(objT) {
|
||||||
|
objT = objT.Elem()
|
||||||
|
objV = objV.Elem()
|
||||||
|
} else if !isStruct(objT) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < objT.NumField(); i++ {
|
||||||
|
t := objT.Field(i).Type
|
||||||
|
if isStructPtr(t) && objV.Field(i).IsNil() {
|
||||||
|
if !strings.Contains(objT.Field(i).Tag.Get("json"), "omitempty") {
|
||||||
|
msgs = append(msgs, fmt.Sprintf("'%s.%s' should not be empty", objT.Name(), objT.Field(i).Name))
|
||||||
|
}
|
||||||
|
} else if (isStruct(t) || isStructPtr(t)) && objV.Field(i).CanInterface() {
|
||||||
|
msgs = append(msgs, checkMandatory(objV.Field(i).Interface())...)
|
||||||
|
} else {
|
||||||
|
msgs = append(msgs, checkMandatoryUnit(objV.Field(i), objT.Field(i), objT.Name())...)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// CheckMandatoryFields checks mandatory field of container's config file
|
||||||
|
func (v *Validator) CheckMandatoryFields() []string {
|
||||||
|
logrus.Debugf("check mandatory fields")
|
||||||
|
|
||||||
|
return checkMandatory(v.spec)
|
||||||
|
}
|
28
vendor/github.com/stretchr/testify/require/doc.go
generated
vendored
Normal file
28
vendor/github.com/stretchr/testify/require/doc.go
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
// Package require implements the same assertions as the `assert` package but
|
||||||
|
// stops test execution when a test fails.
|
||||||
|
//
|
||||||
|
// Example Usage
|
||||||
|
//
|
||||||
|
// The following is a complete example using require in a standard test function:
|
||||||
|
// import (
|
||||||
|
// "testing"
|
||||||
|
// "github.com/stretchr/testify/require"
|
||||||
|
// )
|
||||||
|
//
|
||||||
|
// func TestSomething(t *testing.T) {
|
||||||
|
//
|
||||||
|
// var a string = "Hello"
|
||||||
|
// var b string = "Hello"
|
||||||
|
//
|
||||||
|
// require.Equal(t, a, b, "The two words should be the same.")
|
||||||
|
//
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Assertions
|
||||||
|
//
|
||||||
|
// The `require` package have same global functions as in the `assert` package,
|
||||||
|
// but instead of returning a boolean result they call `t.FailNow()`.
|
||||||
|
//
|
||||||
|
// Every assertion function also takes an optional string message as the final argument,
|
||||||
|
// allowing custom error messages to be appended to the message the assertion method outputs.
|
||||||
|
package require
|
16
vendor/github.com/stretchr/testify/require/forward_requirements.go
generated
vendored
Normal file
16
vendor/github.com/stretchr/testify/require/forward_requirements.go
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
package require
|
||||||
|
|
||||||
|
// Assertions provides assertion methods around the
|
||||||
|
// TestingT interface.
|
||||||
|
type Assertions struct {
|
||||||
|
t TestingT
|
||||||
|
}
|
||||||
|
|
||||||
|
// New makes a new Assertions object for the specified TestingT.
|
||||||
|
func New(t TestingT) *Assertions {
|
||||||
|
return &Assertions{
|
||||||
|
t: t,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//go:generate go run ../_codegen/main.go -output-package=require -template=require_forward.go.tmpl
|
423
vendor/github.com/stretchr/testify/require/require.go
generated
vendored
Normal file
423
vendor/github.com/stretchr/testify/require/require.go
generated
vendored
Normal file
@ -0,0 +1,423 @@
|
|||||||
|
/*
|
||||||
|
* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
|
||||||
|
* THIS FILE MUST NOT BE EDITED BY HAND
|
||||||
|
*/
|
||||||
|
|
||||||
|
package require
|
||||||
|
|
||||||
|
import (
|
||||||
|
assert "github.com/stretchr/testify/assert"
|
||||||
|
http "net/http"
|
||||||
|
url "net/url"
|
||||||
|
time "time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Condition uses a Comparison to assert a complex condition.
|
||||||
|
func Condition(t TestingT, comp assert.Comparison, msgAndArgs ...interface{}) {
|
||||||
|
if !assert.Condition(t, comp, msgAndArgs...) {
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Contains asserts that the specified string, list(array, slice...) or map contains the
|
||||||
|
// specified substring or element.
|
||||||
|
//
|
||||||
|
// assert.Contains(t, "Hello World", "World", "But 'Hello World' does contain 'World'")
|
||||||
|
// assert.Contains(t, ["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'")
|
||||||
|
// assert.Contains(t, {"Hello": "World"}, "Hello", "But {'Hello': 'World'} does contain 'Hello'")
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func Contains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) {
|
||||||
|
if !assert.Contains(t, s, contains, msgAndArgs...) {
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either
|
||||||
|
// a slice or a channel with len == 0.
|
||||||
|
//
|
||||||
|
// assert.Empty(t, obj)
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) {
|
||||||
|
if !assert.Empty(t, object, msgAndArgs...) {
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Equal asserts that two objects are equal.
|
||||||
|
//
|
||||||
|
// assert.Equal(t, 123, 123, "123 and 123 should be equal")
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func Equal(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
|
||||||
|
if !assert.Equal(t, expected, actual, msgAndArgs...) {
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// EqualError asserts that a function returned an error (i.e. not `nil`)
|
||||||
|
// and that it is equal to the provided error.
|
||||||
|
//
|
||||||
|
// actualObj, err := SomeFunction()
|
||||||
|
// assert.EqualError(t, err, expectedErrorString, "An error was expected")
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) {
|
||||||
|
if !assert.EqualError(t, theError, errString, msgAndArgs...) {
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// EqualValues asserts that two objects are equal or convertable to the same types
|
||||||
|
// and equal.
|
||||||
|
//
|
||||||
|
// assert.EqualValues(t, uint32(123), int32(123), "123 and 123 should be equal")
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
|
||||||
|
if !assert.EqualValues(t, expected, actual, msgAndArgs...) {
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error asserts that a function returned an error (i.e. not `nil`).
|
||||||
|
//
|
||||||
|
// actualObj, err := SomeFunction()
|
||||||
|
// if assert.Error(t, err, "An error was expected") {
|
||||||
|
// assert.Equal(t, err, expectedError)
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func Error(t TestingT, err error, msgAndArgs ...interface{}) {
|
||||||
|
if !assert.Error(t, err, msgAndArgs...) {
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exactly asserts that two objects are equal is value and type.
|
||||||
|
//
|
||||||
|
// assert.Exactly(t, int32(123), int64(123), "123 and 123 should NOT be equal")
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func Exactly(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
|
||||||
|
if !assert.Exactly(t, expected, actual, msgAndArgs...) {
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fail reports a failure through
|
||||||
|
func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) {
|
||||||
|
if !assert.Fail(t, failureMessage, msgAndArgs...) {
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FailNow fails test
|
||||||
|
func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) {
|
||||||
|
if !assert.FailNow(t, failureMessage, msgAndArgs...) {
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// False asserts that the specified value is false.
|
||||||
|
//
|
||||||
|
// assert.False(t, myBool, "myBool should be false")
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func False(t TestingT, value bool, msgAndArgs ...interface{}) {
|
||||||
|
if !assert.False(t, value, msgAndArgs...) {
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// HTTPBodyContains asserts that a specified handler returns a
|
||||||
|
// body that contains a string.
|
||||||
|
//
|
||||||
|
// assert.HTTPBodyContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky")
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) {
|
||||||
|
if !assert.HTTPBodyContains(t, handler, method, url, values, str) {
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// HTTPBodyNotContains asserts that a specified handler returns a
|
||||||
|
// body that does not contain a string.
|
||||||
|
//
|
||||||
|
// assert.HTTPBodyNotContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky")
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) {
|
||||||
|
if !assert.HTTPBodyNotContains(t, handler, method, url, values, str) {
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// HTTPError asserts that a specified handler returns an error status code.
|
||||||
|
//
|
||||||
|
// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values) {
|
||||||
|
if !assert.HTTPError(t, handler, method, url, values) {
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// HTTPRedirect asserts that a specified handler returns a redirect status code.
|
||||||
|
//
|
||||||
|
// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values) {
|
||||||
|
if !assert.HTTPRedirect(t, handler, method, url, values) {
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// HTTPSuccess asserts that a specified handler returns a success status code.
|
||||||
|
//
|
||||||
|
// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil)
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func HTTPSuccess(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values) {
|
||||||
|
if !assert.HTTPSuccess(t, handler, method, url, values) {
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Implements asserts that an object is implemented by the specified interface.
|
||||||
|
//
|
||||||
|
// assert.Implements(t, (*MyInterface)(nil), new(MyObject), "MyObject")
|
||||||
|
func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) {
|
||||||
|
if !assert.Implements(t, interfaceObject, object, msgAndArgs...) {
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// InDelta asserts that the two numerals are within delta of each other.
|
||||||
|
//
|
||||||
|
// assert.InDelta(t, math.Pi, (22 / 7.0), 0.01)
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func InDelta(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
|
||||||
|
if !assert.InDelta(t, expected, actual, delta, msgAndArgs...) {
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// InDeltaSlice is the same as InDelta, except it compares two slices.
|
||||||
|
func InDeltaSlice(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
|
||||||
|
if !assert.InDeltaSlice(t, expected, actual, delta, msgAndArgs...) {
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// InEpsilon asserts that expected and actual have a relative error less than epsilon
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func InEpsilon(t TestingT, expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) {
|
||||||
|
if !assert.InEpsilon(t, expected, actual, epsilon, msgAndArgs...) {
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices.
|
||||||
|
func InEpsilonSlice(t TestingT, expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) {
|
||||||
|
if !assert.InEpsilonSlice(t, expected, actual, epsilon, msgAndArgs...) {
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsType asserts that the specified objects are of the same type.
|
||||||
|
func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) {
|
||||||
|
if !assert.IsType(t, expectedType, object, msgAndArgs...) {
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// JSONEq asserts that two JSON strings are equivalent.
|
||||||
|
//
|
||||||
|
// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) {
|
||||||
|
if !assert.JSONEq(t, expected, actual, msgAndArgs...) {
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len asserts that the specified object has specific length.
|
||||||
|
// Len also fails if the object has a type that len() not accept.
|
||||||
|
//
|
||||||
|
// assert.Len(t, mySlice, 3, "The size of slice is not 3")
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) {
|
||||||
|
if !assert.Len(t, object, length, msgAndArgs...) {
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Nil asserts that the specified object is nil.
|
||||||
|
//
|
||||||
|
// assert.Nil(t, err, "err should be nothing")
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) {
|
||||||
|
if !assert.Nil(t, object, msgAndArgs...) {
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NoError asserts that a function returned no error (i.e. `nil`).
|
||||||
|
//
|
||||||
|
// actualObj, err := SomeFunction()
|
||||||
|
// if assert.NoError(t, err) {
|
||||||
|
// assert.Equal(t, actualObj, expectedObj)
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func NoError(t TestingT, err error, msgAndArgs ...interface{}) {
|
||||||
|
if !assert.NoError(t, err, msgAndArgs...) {
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
|
||||||
|
// specified substring or element.
|
||||||
|
//
|
||||||
|
// assert.NotContains(t, "Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'")
|
||||||
|
// assert.NotContains(t, ["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'")
|
||||||
|
// assert.NotContains(t, {"Hello": "World"}, "Earth", "But {'Hello': 'World'} does NOT contain 'Earth'")
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func NotContains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) {
|
||||||
|
if !assert.NotContains(t, s, contains, msgAndArgs...) {
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
|
||||||
|
// a slice or a channel with len == 0.
|
||||||
|
//
|
||||||
|
// if assert.NotEmpty(t, obj) {
|
||||||
|
// assert.Equal(t, "two", obj[1])
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) {
|
||||||
|
if !assert.NotEmpty(t, object, msgAndArgs...) {
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotEqual asserts that the specified values are NOT equal.
|
||||||
|
//
|
||||||
|
// assert.NotEqual(t, obj1, obj2, "two objects shouldn't be equal")
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func NotEqual(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
|
||||||
|
if !assert.NotEqual(t, expected, actual, msgAndArgs...) {
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotNil asserts that the specified object is not nil.
|
||||||
|
//
|
||||||
|
// assert.NotNil(t, err, "err should be something")
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) {
|
||||||
|
if !assert.NotNil(t, object, msgAndArgs...) {
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
|
||||||
|
//
|
||||||
|
// assert.NotPanics(t, func(){
|
||||||
|
// RemainCalm()
|
||||||
|
// }, "Calling RemainCalm() should NOT panic")
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func NotPanics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) {
|
||||||
|
if !assert.NotPanics(t, f, msgAndArgs...) {
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotRegexp asserts that a specified regexp does not match a string.
|
||||||
|
//
|
||||||
|
// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting")
|
||||||
|
// assert.NotRegexp(t, "^start", "it's not starting")
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) {
|
||||||
|
if !assert.NotRegexp(t, rx, str, msgAndArgs...) {
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotZero asserts that i is not the zero value for its type and returns the truth.
|
||||||
|
func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) {
|
||||||
|
if !assert.NotZero(t, i, msgAndArgs...) {
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Panics asserts that the code inside the specified PanicTestFunc panics.
|
||||||
|
//
|
||||||
|
// assert.Panics(t, func(){
|
||||||
|
// GoCrazy()
|
||||||
|
// }, "Calling GoCrazy() should panic")
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) {
|
||||||
|
if !assert.Panics(t, f, msgAndArgs...) {
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Regexp asserts that a specified regexp matches a string.
|
||||||
|
//
|
||||||
|
// assert.Regexp(t, regexp.MustCompile("start"), "it's starting")
|
||||||
|
// assert.Regexp(t, "start...$", "it's not starting")
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) {
|
||||||
|
if !assert.Regexp(t, rx, str, msgAndArgs...) {
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// True asserts that the specified value is true.
|
||||||
|
//
|
||||||
|
// assert.True(t, myBool, "myBool should be true")
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func True(t TestingT, value bool, msgAndArgs ...interface{}) {
|
||||||
|
if !assert.True(t, value, msgAndArgs...) {
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithinDuration asserts that the two times are within duration delta of each other.
|
||||||
|
//
|
||||||
|
// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s")
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func WithinDuration(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) {
|
||||||
|
if !assert.WithinDuration(t, expected, actual, delta, msgAndArgs...) {
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Zero asserts that i is the zero value for its type and returns the truth.
|
||||||
|
func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) {
|
||||||
|
if !assert.Zero(t, i, msgAndArgs...) {
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
}
|
6
vendor/github.com/stretchr/testify/require/require.go.tmpl
generated
vendored
Normal file
6
vendor/github.com/stretchr/testify/require/require.go.tmpl
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
{{.Comment}}
|
||||||
|
func {{.DocInfo.Name}}(t TestingT, {{.Params}}) {
|
||||||
|
if !assert.{{.DocInfo.Name}}(t, {{.ForwardedParams}}) {
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
}
|
347
vendor/github.com/stretchr/testify/require/require_forward.go
generated
vendored
Normal file
347
vendor/github.com/stretchr/testify/require/require_forward.go
generated
vendored
Normal file
@ -0,0 +1,347 @@
|
|||||||
|
/*
|
||||||
|
* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
|
||||||
|
* THIS FILE MUST NOT BE EDITED BY HAND
|
||||||
|
*/
|
||||||
|
|
||||||
|
package require
|
||||||
|
|
||||||
|
import (
|
||||||
|
assert "github.com/stretchr/testify/assert"
|
||||||
|
http "net/http"
|
||||||
|
url "net/url"
|
||||||
|
time "time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Condition uses a Comparison to assert a complex condition.
|
||||||
|
func (a *Assertions) Condition(comp assert.Comparison, msgAndArgs ...interface{}) {
|
||||||
|
Condition(a.t, comp, msgAndArgs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Contains asserts that the specified string, list(array, slice...) or map contains the
|
||||||
|
// specified substring or element.
|
||||||
|
//
|
||||||
|
// a.Contains("Hello World", "World", "But 'Hello World' does contain 'World'")
|
||||||
|
// a.Contains(["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'")
|
||||||
|
// a.Contains({"Hello": "World"}, "Hello", "But {'Hello': 'World'} does contain 'Hello'")
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) {
|
||||||
|
Contains(a.t, s, contains, msgAndArgs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either
|
||||||
|
// a slice or a channel with len == 0.
|
||||||
|
//
|
||||||
|
// a.Empty(obj)
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) {
|
||||||
|
Empty(a.t, object, msgAndArgs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Equal asserts that two objects are equal.
|
||||||
|
//
|
||||||
|
// a.Equal(123, 123, "123 and 123 should be equal")
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
|
||||||
|
Equal(a.t, expected, actual, msgAndArgs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// EqualError asserts that a function returned an error (i.e. not `nil`)
|
||||||
|
// and that it is equal to the provided error.
|
||||||
|
//
|
||||||
|
// actualObj, err := SomeFunction()
|
||||||
|
// a.EqualError(err, expectedErrorString, "An error was expected")
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) {
|
||||||
|
EqualError(a.t, theError, errString, msgAndArgs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// EqualValues asserts that two objects are equal or convertable to the same types
|
||||||
|
// and equal.
|
||||||
|
//
|
||||||
|
// a.EqualValues(uint32(123), int32(123), "123 and 123 should be equal")
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
|
||||||
|
EqualValues(a.t, expected, actual, msgAndArgs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error asserts that a function returned an error (i.e. not `nil`).
|
||||||
|
//
|
||||||
|
// actualObj, err := SomeFunction()
|
||||||
|
// if a.Error(err, "An error was expected") {
|
||||||
|
// assert.Equal(t, err, expectedError)
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func (a *Assertions) Error(err error, msgAndArgs ...interface{}) {
|
||||||
|
Error(a.t, err, msgAndArgs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exactly asserts that two objects are equal is value and type.
|
||||||
|
//
|
||||||
|
// a.Exactly(int32(123), int64(123), "123 and 123 should NOT be equal")
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
|
||||||
|
Exactly(a.t, expected, actual, msgAndArgs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fail reports a failure through
|
||||||
|
func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) {
|
||||||
|
Fail(a.t, failureMessage, msgAndArgs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FailNow fails test
|
||||||
|
func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) {
|
||||||
|
FailNow(a.t, failureMessage, msgAndArgs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// False asserts that the specified value is false.
|
||||||
|
//
|
||||||
|
// a.False(myBool, "myBool should be false")
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func (a *Assertions) False(value bool, msgAndArgs ...interface{}) {
|
||||||
|
False(a.t, value, msgAndArgs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HTTPBodyContains asserts that a specified handler returns a
|
||||||
|
// body that contains a string.
|
||||||
|
//
|
||||||
|
// a.HTTPBodyContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky")
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) {
|
||||||
|
HTTPBodyContains(a.t, handler, method, url, values, str)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HTTPBodyNotContains asserts that a specified handler returns a
|
||||||
|
// body that does not contain a string.
|
||||||
|
//
|
||||||
|
// a.HTTPBodyNotContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky")
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) {
|
||||||
|
HTTPBodyNotContains(a.t, handler, method, url, values, str)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HTTPError asserts that a specified handler returns an error status code.
|
||||||
|
//
|
||||||
|
// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values) {
|
||||||
|
HTTPError(a.t, handler, method, url, values)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HTTPRedirect asserts that a specified handler returns a redirect status code.
|
||||||
|
//
|
||||||
|
// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values) {
|
||||||
|
HTTPRedirect(a.t, handler, method, url, values)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HTTPSuccess asserts that a specified handler returns a success status code.
|
||||||
|
//
|
||||||
|
// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil)
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values) {
|
||||||
|
HTTPSuccess(a.t, handler, method, url, values)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Implements asserts that an object is implemented by the specified interface.
|
||||||
|
//
|
||||||
|
// a.Implements((*MyInterface)(nil), new(MyObject), "MyObject")
|
||||||
|
func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) {
|
||||||
|
Implements(a.t, interfaceObject, object, msgAndArgs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InDelta asserts that the two numerals are within delta of each other.
|
||||||
|
//
|
||||||
|
// a.InDelta(math.Pi, (22 / 7.0), 0.01)
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
|
||||||
|
InDelta(a.t, expected, actual, delta, msgAndArgs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InDeltaSlice is the same as InDelta, except it compares two slices.
|
||||||
|
func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
|
||||||
|
InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InEpsilon asserts that expected and actual have a relative error less than epsilon
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) {
|
||||||
|
InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices.
|
||||||
|
func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) {
|
||||||
|
InEpsilonSlice(a.t, expected, actual, epsilon, msgAndArgs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsType asserts that the specified objects are of the same type.
|
||||||
|
func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) {
|
||||||
|
IsType(a.t, expectedType, object, msgAndArgs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// JSONEq asserts that two JSON strings are equivalent.
|
||||||
|
//
|
||||||
|
// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) {
|
||||||
|
JSONEq(a.t, expected, actual, msgAndArgs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len asserts that the specified object has specific length.
|
||||||
|
// Len also fails if the object has a type that len() not accept.
|
||||||
|
//
|
||||||
|
// a.Len(mySlice, 3, "The size of slice is not 3")
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) {
|
||||||
|
Len(a.t, object, length, msgAndArgs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Nil asserts that the specified object is nil.
|
||||||
|
//
|
||||||
|
// a.Nil(err, "err should be nothing")
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) {
|
||||||
|
Nil(a.t, object, msgAndArgs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NoError asserts that a function returned no error (i.e. `nil`).
|
||||||
|
//
|
||||||
|
// actualObj, err := SomeFunction()
|
||||||
|
// if a.NoError(err) {
|
||||||
|
// assert.Equal(t, actualObj, expectedObj)
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) {
|
||||||
|
NoError(a.t, err, msgAndArgs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
|
||||||
|
// specified substring or element.
|
||||||
|
//
|
||||||
|
// a.NotContains("Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'")
|
||||||
|
// a.NotContains(["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'")
|
||||||
|
// a.NotContains({"Hello": "World"}, "Earth", "But {'Hello': 'World'} does NOT contain 'Earth'")
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) {
|
||||||
|
NotContains(a.t, s, contains, msgAndArgs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
|
||||||
|
// a slice or a channel with len == 0.
|
||||||
|
//
|
||||||
|
// if a.NotEmpty(obj) {
|
||||||
|
// assert.Equal(t, "two", obj[1])
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) {
|
||||||
|
NotEmpty(a.t, object, msgAndArgs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotEqual asserts that the specified values are NOT equal.
|
||||||
|
//
|
||||||
|
// a.NotEqual(obj1, obj2, "two objects shouldn't be equal")
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
|
||||||
|
NotEqual(a.t, expected, actual, msgAndArgs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotNil asserts that the specified object is not nil.
|
||||||
|
//
|
||||||
|
// a.NotNil(err, "err should be something")
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) {
|
||||||
|
NotNil(a.t, object, msgAndArgs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
|
||||||
|
//
|
||||||
|
// a.NotPanics(func(){
|
||||||
|
// RemainCalm()
|
||||||
|
// }, "Calling RemainCalm() should NOT panic")
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func (a *Assertions) NotPanics(f assert.PanicTestFunc, msgAndArgs ...interface{}) {
|
||||||
|
NotPanics(a.t, f, msgAndArgs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotRegexp asserts that a specified regexp does not match a string.
|
||||||
|
//
|
||||||
|
// a.NotRegexp(regexp.MustCompile("starts"), "it's starting")
|
||||||
|
// a.NotRegexp("^start", "it's not starting")
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) {
|
||||||
|
NotRegexp(a.t, rx, str, msgAndArgs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotZero asserts that i is not the zero value for its type and returns the truth.
|
||||||
|
func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) {
|
||||||
|
NotZero(a.t, i, msgAndArgs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Panics asserts that the code inside the specified PanicTestFunc panics.
|
||||||
|
//
|
||||||
|
// a.Panics(func(){
|
||||||
|
// GoCrazy()
|
||||||
|
// }, "Calling GoCrazy() should panic")
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func (a *Assertions) Panics(f assert.PanicTestFunc, msgAndArgs ...interface{}) {
|
||||||
|
Panics(a.t, f, msgAndArgs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Regexp asserts that a specified regexp matches a string.
|
||||||
|
//
|
||||||
|
// a.Regexp(regexp.MustCompile("start"), "it's starting")
|
||||||
|
// a.Regexp("start...$", "it's not starting")
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) {
|
||||||
|
Regexp(a.t, rx, str, msgAndArgs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// True asserts that the specified value is true.
|
||||||
|
//
|
||||||
|
// a.True(myBool, "myBool should be true")
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func (a *Assertions) True(value bool, msgAndArgs ...interface{}) {
|
||||||
|
True(a.t, value, msgAndArgs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithinDuration asserts that the two times are within duration delta of each other.
|
||||||
|
//
|
||||||
|
// a.WithinDuration(time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s")
|
||||||
|
//
|
||||||
|
// Returns whether the assertion was successful (true) or not (false).
|
||||||
|
func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) {
|
||||||
|
WithinDuration(a.t, expected, actual, delta, msgAndArgs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Zero asserts that i is the zero value for its type and returns the truth.
|
||||||
|
func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) {
|
||||||
|
Zero(a.t, i, msgAndArgs...)
|
||||||
|
}
|
4
vendor/github.com/stretchr/testify/require/require_forward.go.tmpl
generated
vendored
Normal file
4
vendor/github.com/stretchr/testify/require/require_forward.go.tmpl
generated
vendored
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
{{.CommentWithoutT "a"}}
|
||||||
|
func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) {
|
||||||
|
{{.DocInfo.Name}}(a.t, {{.ForwardedParams}})
|
||||||
|
}
|
9
vendor/github.com/stretchr/testify/require/requirements.go
generated
vendored
Normal file
9
vendor/github.com/stretchr/testify/require/requirements.go
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
package require
|
||||||
|
|
||||||
|
// TestingT is an interface wrapper around *testing.T
|
||||||
|
type TestingT interface {
|
||||||
|
Errorf(format string, args ...interface{})
|
||||||
|
FailNow()
|
||||||
|
}
|
||||||
|
|
||||||
|
//go:generate go run ../_codegen/main.go -output-package=require -template=require.go.tmpl
|
24
vendor/github.com/syndtr/gocapability/LICENSE
generated
vendored
Normal file
24
vendor/github.com/syndtr/gocapability/LICENSE
generated
vendored
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
Copyright 2013 Suryandaru Triandana <syndtr@gmail.com>
|
||||||
|
All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
* Redistributions in binary form must reproduce the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer in the
|
||||||
|
documentation and/or other materials provided with the distribution.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
72
vendor/github.com/syndtr/gocapability/capability/capability.go
generated
vendored
Normal file
72
vendor/github.com/syndtr/gocapability/capability/capability.go
generated
vendored
Normal file
@ -0,0 +1,72 @@
|
|||||||
|
// Copyright (c) 2013, Suryandaru Triandana <syndtr@gmail.com>
|
||||||
|
// All rights reserved.
|
||||||
|
//
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package capability provides utilities for manipulating POSIX capabilities.
|
||||||
|
package capability
|
||||||
|
|
||||||
|
type Capabilities interface {
|
||||||
|
// Get check whether a capability present in the given
|
||||||
|
// capabilities set. The 'which' value should be one of EFFECTIVE,
|
||||||
|
// PERMITTED, INHERITABLE, BOUNDING or AMBIENT.
|
||||||
|
Get(which CapType, what Cap) bool
|
||||||
|
|
||||||
|
// Empty check whether all capability bits of the given capabilities
|
||||||
|
// set are zero. The 'which' value should be one of EFFECTIVE,
|
||||||
|
// PERMITTED, INHERITABLE, BOUNDING or AMBIENT.
|
||||||
|
Empty(which CapType) bool
|
||||||
|
|
||||||
|
// Full check whether all capability bits of the given capabilities
|
||||||
|
// set are one. The 'which' value should be one of EFFECTIVE,
|
||||||
|
// PERMITTED, INHERITABLE, BOUNDING or AMBIENT.
|
||||||
|
Full(which CapType) bool
|
||||||
|
|
||||||
|
// Set sets capabilities of the given capabilities sets. The
|
||||||
|
// 'which' value should be one or combination (OR'ed) of EFFECTIVE,
|
||||||
|
// PERMITTED, INHERITABLE, BOUNDING or AMBIENT.
|
||||||
|
Set(which CapType, caps ...Cap)
|
||||||
|
|
||||||
|
// Unset unsets capabilities of the given capabilities sets. The
|
||||||
|
// 'which' value should be one or combination (OR'ed) of EFFECTIVE,
|
||||||
|
// PERMITTED, INHERITABLE, BOUNDING or AMBIENT.
|
||||||
|
Unset(which CapType, caps ...Cap)
|
||||||
|
|
||||||
|
// Fill sets all bits of the given capabilities kind to one. The
|
||||||
|
// 'kind' value should be one or combination (OR'ed) of CAPS,
|
||||||
|
// BOUNDS or AMBS.
|
||||||
|
Fill(kind CapType)
|
||||||
|
|
||||||
|
// Clear sets all bits of the given capabilities kind to zero. The
|
||||||
|
// 'kind' value should be one or combination (OR'ed) of CAPS,
|
||||||
|
// BOUNDS or AMBS.
|
||||||
|
Clear(kind CapType)
|
||||||
|
|
||||||
|
// String return current capabilities state of the given capabilities
|
||||||
|
// set as string. The 'which' value should be one of EFFECTIVE,
|
||||||
|
// PERMITTED, INHERITABLE BOUNDING or AMBIENT
|
||||||
|
StringCap(which CapType) string
|
||||||
|
|
||||||
|
// String return current capabilities state as string.
|
||||||
|
String() string
|
||||||
|
|
||||||
|
// Load load actual capabilities value. This will overwrite all
|
||||||
|
// outstanding changes.
|
||||||
|
Load() error
|
||||||
|
|
||||||
|
// Apply apply the capabilities settings, so all changes will take
|
||||||
|
// effect.
|
||||||
|
Apply(kind CapType) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewPid create new initialized Capabilities object for given pid when it
|
||||||
|
// is nonzero, or for the current pid if pid is 0
|
||||||
|
func NewPid(pid int) (Capabilities, error) {
|
||||||
|
return newPid(pid)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFile create new initialized Capabilities object for given named file.
|
||||||
|
func NewFile(name string) (Capabilities, error) {
|
||||||
|
return newFile(name)
|
||||||
|
}
|
650
vendor/github.com/syndtr/gocapability/capability/capability_linux.go
generated
vendored
Normal file
650
vendor/github.com/syndtr/gocapability/capability/capability_linux.go
generated
vendored
Normal file
@ -0,0 +1,650 @@
|
|||||||
|
// Copyright (c) 2013, Suryandaru Triandana <syndtr@gmail.com>
|
||||||
|
// All rights reserved.
|
||||||
|
//
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
package capability
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
var errUnknownVers = errors.New("unknown capability version")
|
||||||
|
|
||||||
|
const (
|
||||||
|
linuxCapVer1 = 0x19980330
|
||||||
|
linuxCapVer2 = 0x20071026
|
||||||
|
linuxCapVer3 = 0x20080522
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
capVers uint32
|
||||||
|
capLastCap Cap
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
var hdr capHeader
|
||||||
|
capget(&hdr, nil)
|
||||||
|
capVers = hdr.version
|
||||||
|
|
||||||
|
if initLastCap() == nil {
|
||||||
|
CAP_LAST_CAP = capLastCap
|
||||||
|
if capLastCap > 31 {
|
||||||
|
capUpperMask = (uint32(1) << (uint(capLastCap) - 31)) - 1
|
||||||
|
} else {
|
||||||
|
capUpperMask = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func initLastCap() error {
|
||||||
|
if capLastCap != 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := os.Open("/proc/sys/kernel/cap_last_cap")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
var b []byte = make([]byte, 11)
|
||||||
|
_, err = f.Read(b)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Sscanf(string(b), "%d", &capLastCap)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func mkStringCap(c Capabilities, which CapType) (ret string) {
|
||||||
|
for i, first := Cap(0), true; i <= CAP_LAST_CAP; i++ {
|
||||||
|
if !c.Get(which, i) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if first {
|
||||||
|
first = false
|
||||||
|
} else {
|
||||||
|
ret += ", "
|
||||||
|
}
|
||||||
|
ret += i.String()
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func mkString(c Capabilities, max CapType) (ret string) {
|
||||||
|
ret = "{"
|
||||||
|
for i := CapType(1); i <= max; i <<= 1 {
|
||||||
|
ret += " " + i.String() + "=\""
|
||||||
|
if c.Empty(i) {
|
||||||
|
ret += "empty"
|
||||||
|
} else if c.Full(i) {
|
||||||
|
ret += "full"
|
||||||
|
} else {
|
||||||
|
ret += c.StringCap(i)
|
||||||
|
}
|
||||||
|
ret += "\""
|
||||||
|
}
|
||||||
|
ret += " }"
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func newPid(pid int) (c Capabilities, err error) {
|
||||||
|
switch capVers {
|
||||||
|
case linuxCapVer1:
|
||||||
|
p := new(capsV1)
|
||||||
|
p.hdr.version = capVers
|
||||||
|
p.hdr.pid = pid
|
||||||
|
c = p
|
||||||
|
case linuxCapVer2, linuxCapVer3:
|
||||||
|
p := new(capsV3)
|
||||||
|
p.hdr.version = capVers
|
||||||
|
p.hdr.pid = pid
|
||||||
|
c = p
|
||||||
|
default:
|
||||||
|
err = errUnknownVers
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = c.Load()
|
||||||
|
if err != nil {
|
||||||
|
c = nil
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
type capsV1 struct {
|
||||||
|
hdr capHeader
|
||||||
|
data capData
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *capsV1) Get(which CapType, what Cap) bool {
|
||||||
|
if what > 32 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
switch which {
|
||||||
|
case EFFECTIVE:
|
||||||
|
return (1<<uint(what))&c.data.effective != 0
|
||||||
|
case PERMITTED:
|
||||||
|
return (1<<uint(what))&c.data.permitted != 0
|
||||||
|
case INHERITABLE:
|
||||||
|
return (1<<uint(what))&c.data.inheritable != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *capsV1) getData(which CapType) (ret uint32) {
|
||||||
|
switch which {
|
||||||
|
case EFFECTIVE:
|
||||||
|
ret = c.data.effective
|
||||||
|
case PERMITTED:
|
||||||
|
ret = c.data.permitted
|
||||||
|
case INHERITABLE:
|
||||||
|
ret = c.data.inheritable
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *capsV1) Empty(which CapType) bool {
|
||||||
|
return c.getData(which) == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *capsV1) Full(which CapType) bool {
|
||||||
|
return (c.getData(which) & 0x7fffffff) == 0x7fffffff
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *capsV1) Set(which CapType, caps ...Cap) {
|
||||||
|
for _, what := range caps {
|
||||||
|
if what > 32 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if which&EFFECTIVE != 0 {
|
||||||
|
c.data.effective |= 1 << uint(what)
|
||||||
|
}
|
||||||
|
if which&PERMITTED != 0 {
|
||||||
|
c.data.permitted |= 1 << uint(what)
|
||||||
|
}
|
||||||
|
if which&INHERITABLE != 0 {
|
||||||
|
c.data.inheritable |= 1 << uint(what)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *capsV1) Unset(which CapType, caps ...Cap) {
|
||||||
|
for _, what := range caps {
|
||||||
|
if what > 32 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if which&EFFECTIVE != 0 {
|
||||||
|
c.data.effective &= ^(1 << uint(what))
|
||||||
|
}
|
||||||
|
if which&PERMITTED != 0 {
|
||||||
|
c.data.permitted &= ^(1 << uint(what))
|
||||||
|
}
|
||||||
|
if which&INHERITABLE != 0 {
|
||||||
|
c.data.inheritable &= ^(1 << uint(what))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *capsV1) Fill(kind CapType) {
|
||||||
|
if kind&CAPS == CAPS {
|
||||||
|
c.data.effective = 0x7fffffff
|
||||||
|
c.data.permitted = 0x7fffffff
|
||||||
|
c.data.inheritable = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *capsV1) Clear(kind CapType) {
|
||||||
|
if kind&CAPS == CAPS {
|
||||||
|
c.data.effective = 0
|
||||||
|
c.data.permitted = 0
|
||||||
|
c.data.inheritable = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *capsV1) StringCap(which CapType) (ret string) {
|
||||||
|
return mkStringCap(c, which)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *capsV1) String() (ret string) {
|
||||||
|
return mkString(c, BOUNDING)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *capsV1) Load() (err error) {
|
||||||
|
return capget(&c.hdr, &c.data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *capsV1) Apply(kind CapType) error {
|
||||||
|
if kind&CAPS == CAPS {
|
||||||
|
return capset(&c.hdr, &c.data)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type capsV3 struct {
|
||||||
|
hdr capHeader
|
||||||
|
data [2]capData
|
||||||
|
bounds [2]uint32
|
||||||
|
ambient [2]uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *capsV3) Get(which CapType, what Cap) bool {
|
||||||
|
var i uint
|
||||||
|
if what > 31 {
|
||||||
|
i = uint(what) >> 5
|
||||||
|
what %= 32
|
||||||
|
}
|
||||||
|
|
||||||
|
switch which {
|
||||||
|
case EFFECTIVE:
|
||||||
|
return (1<<uint(what))&c.data[i].effective != 0
|
||||||
|
case PERMITTED:
|
||||||
|
return (1<<uint(what))&c.data[i].permitted != 0
|
||||||
|
case INHERITABLE:
|
||||||
|
return (1<<uint(what))&c.data[i].inheritable != 0
|
||||||
|
case BOUNDING:
|
||||||
|
return (1<<uint(what))&c.bounds[i] != 0
|
||||||
|
case AMBIENT:
|
||||||
|
return (1<<uint(what))&c.ambient[i] != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *capsV3) getData(which CapType, dest []uint32) {
|
||||||
|
switch which {
|
||||||
|
case EFFECTIVE:
|
||||||
|
dest[0] = c.data[0].effective
|
||||||
|
dest[1] = c.data[1].effective
|
||||||
|
case PERMITTED:
|
||||||
|
dest[0] = c.data[0].permitted
|
||||||
|
dest[1] = c.data[1].permitted
|
||||||
|
case INHERITABLE:
|
||||||
|
dest[0] = c.data[0].inheritable
|
||||||
|
dest[1] = c.data[1].inheritable
|
||||||
|
case BOUNDING:
|
||||||
|
dest[0] = c.bounds[0]
|
||||||
|
dest[1] = c.bounds[1]
|
||||||
|
case AMBIENT:
|
||||||
|
dest[0] = c.ambient[0]
|
||||||
|
dest[1] = c.ambient[1]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *capsV3) Empty(which CapType) bool {
|
||||||
|
var data [2]uint32
|
||||||
|
c.getData(which, data[:])
|
||||||
|
return data[0] == 0 && data[1] == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *capsV3) Full(which CapType) bool {
|
||||||
|
var data [2]uint32
|
||||||
|
c.getData(which, data[:])
|
||||||
|
if (data[0] & 0xffffffff) != 0xffffffff {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return (data[1] & capUpperMask) == capUpperMask
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *capsV3) Set(which CapType, caps ...Cap) {
|
||||||
|
for _, what := range caps {
|
||||||
|
var i uint
|
||||||
|
if what > 31 {
|
||||||
|
i = uint(what) >> 5
|
||||||
|
what %= 32
|
||||||
|
}
|
||||||
|
|
||||||
|
if which&EFFECTIVE != 0 {
|
||||||
|
c.data[i].effective |= 1 << uint(what)
|
||||||
|
}
|
||||||
|
if which&PERMITTED != 0 {
|
||||||
|
c.data[i].permitted |= 1 << uint(what)
|
||||||
|
}
|
||||||
|
if which&INHERITABLE != 0 {
|
||||||
|
c.data[i].inheritable |= 1 << uint(what)
|
||||||
|
}
|
||||||
|
if which&BOUNDING != 0 {
|
||||||
|
c.bounds[i] |= 1 << uint(what)
|
||||||
|
}
|
||||||
|
if which&AMBIENT != 0 {
|
||||||
|
c.ambient[i] |= 1 << uint(what)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *capsV3) Unset(which CapType, caps ...Cap) {
|
||||||
|
for _, what := range caps {
|
||||||
|
var i uint
|
||||||
|
if what > 31 {
|
||||||
|
i = uint(what) >> 5
|
||||||
|
what %= 32
|
||||||
|
}
|
||||||
|
|
||||||
|
if which&EFFECTIVE != 0 {
|
||||||
|
c.data[i].effective &= ^(1 << uint(what))
|
||||||
|
}
|
||||||
|
if which&PERMITTED != 0 {
|
||||||
|
c.data[i].permitted &= ^(1 << uint(what))
|
||||||
|
}
|
||||||
|
if which&INHERITABLE != 0 {
|
||||||
|
c.data[i].inheritable &= ^(1 << uint(what))
|
||||||
|
}
|
||||||
|
if which&BOUNDING != 0 {
|
||||||
|
c.bounds[i] &= ^(1 << uint(what))
|
||||||
|
}
|
||||||
|
if which&AMBIENT != 0 {
|
||||||
|
c.ambient[i] &= ^(1 << uint(what))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *capsV3) Fill(kind CapType) {
|
||||||
|
if kind&CAPS == CAPS {
|
||||||
|
c.data[0].effective = 0xffffffff
|
||||||
|
c.data[0].permitted = 0xffffffff
|
||||||
|
c.data[0].inheritable = 0
|
||||||
|
c.data[1].effective = 0xffffffff
|
||||||
|
c.data[1].permitted = 0xffffffff
|
||||||
|
c.data[1].inheritable = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
if kind&BOUNDS == BOUNDS {
|
||||||
|
c.bounds[0] = 0xffffffff
|
||||||
|
c.bounds[1] = 0xffffffff
|
||||||
|
}
|
||||||
|
if kind&AMBS == AMBS {
|
||||||
|
c.ambient[0] = 0xffffffff
|
||||||
|
c.ambient[1] = 0xffffffff
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *capsV3) Clear(kind CapType) {
|
||||||
|
if kind&CAPS == CAPS {
|
||||||
|
c.data[0].effective = 0
|
||||||
|
c.data[0].permitted = 0
|
||||||
|
c.data[0].inheritable = 0
|
||||||
|
c.data[1].effective = 0
|
||||||
|
c.data[1].permitted = 0
|
||||||
|
c.data[1].inheritable = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
if kind&BOUNDS == BOUNDS {
|
||||||
|
c.bounds[0] = 0
|
||||||
|
c.bounds[1] = 0
|
||||||
|
}
|
||||||
|
if kind&AMBS == AMBS {
|
||||||
|
c.ambient[0] = 0
|
||||||
|
c.ambient[1] = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *capsV3) StringCap(which CapType) (ret string) {
|
||||||
|
return mkStringCap(c, which)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *capsV3) String() (ret string) {
|
||||||
|
return mkString(c, BOUNDING)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *capsV3) Load() (err error) {
|
||||||
|
err = capget(&c.hdr, &c.data[0])
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var status_path string
|
||||||
|
|
||||||
|
if c.hdr.pid == 0 {
|
||||||
|
status_path = fmt.Sprintf("/proc/self/status")
|
||||||
|
} else {
|
||||||
|
status_path = fmt.Sprintf("/proc/%d/status", c.hdr.pid)
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := os.Open(status_path)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
b := bufio.NewReader(f)
|
||||||
|
for {
|
||||||
|
line, e := b.ReadString('\n')
|
||||||
|
if e != nil {
|
||||||
|
if e != io.EOF {
|
||||||
|
err = e
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if strings.HasPrefix(line, "CapB") {
|
||||||
|
fmt.Sscanf(line[4:], "nd: %08x%08x", &c.bounds[1], &c.bounds[0])
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if strings.HasPrefix(line, "CapA") {
|
||||||
|
fmt.Sscanf(line[4:], "mb: %08x%08x", &c.ambient[1], &c.ambient[0])
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
f.Close()
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *capsV3) Apply(kind CapType) (err error) {
|
||||||
|
if kind&BOUNDS == BOUNDS {
|
||||||
|
var data [2]capData
|
||||||
|
err = capget(&c.hdr, &data[0])
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if (1<<uint(CAP_SETPCAP))&data[0].effective != 0 {
|
||||||
|
for i := Cap(0); i <= CAP_LAST_CAP; i++ {
|
||||||
|
if c.Get(BOUNDING, i) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
err = prctl(syscall.PR_CAPBSET_DROP, uintptr(i), 0, 0, 0)
|
||||||
|
if err != nil {
|
||||||
|
// Ignore EINVAL since the capability may not be supported in this system.
|
||||||
|
if errno, ok := err.(syscall.Errno); ok && errno == syscall.EINVAL {
|
||||||
|
err = nil
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if kind&CAPS == CAPS {
|
||||||
|
err = capset(&c.hdr, &c.data[0])
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if kind&AMBS == AMBS {
|
||||||
|
for i := Cap(0); i <= CAP_LAST_CAP; i++ {
|
||||||
|
action := pr_CAP_AMBIENT_LOWER
|
||||||
|
if c.Get(AMBIENT, i) {
|
||||||
|
action = pr_CAP_AMBIENT_RAISE
|
||||||
|
}
|
||||||
|
err := prctl(pr_CAP_AMBIENT, action, uintptr(i), 0, 0)
|
||||||
|
// Ignore EINVAL as not supported on kernels before 4.3
|
||||||
|
if errno, ok := err.(syscall.Errno); ok && errno == syscall.EINVAL {
|
||||||
|
err = nil
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func newFile(path string) (c Capabilities, err error) {
|
||||||
|
c = &capsFile{path: path}
|
||||||
|
err = c.Load()
|
||||||
|
if err != nil {
|
||||||
|
c = nil
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
type capsFile struct {
|
||||||
|
path string
|
||||||
|
data vfscapData
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *capsFile) Get(which CapType, what Cap) bool {
|
||||||
|
var i uint
|
||||||
|
if what > 31 {
|
||||||
|
if c.data.version == 1 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
i = uint(what) >> 5
|
||||||
|
what %= 32
|
||||||
|
}
|
||||||
|
|
||||||
|
switch which {
|
||||||
|
case EFFECTIVE:
|
||||||
|
return (1<<uint(what))&c.data.effective[i] != 0
|
||||||
|
case PERMITTED:
|
||||||
|
return (1<<uint(what))&c.data.data[i].permitted != 0
|
||||||
|
case INHERITABLE:
|
||||||
|
return (1<<uint(what))&c.data.data[i].inheritable != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *capsFile) getData(which CapType, dest []uint32) {
|
||||||
|
switch which {
|
||||||
|
case EFFECTIVE:
|
||||||
|
dest[0] = c.data.effective[0]
|
||||||
|
dest[1] = c.data.effective[1]
|
||||||
|
case PERMITTED:
|
||||||
|
dest[0] = c.data.data[0].permitted
|
||||||
|
dest[1] = c.data.data[1].permitted
|
||||||
|
case INHERITABLE:
|
||||||
|
dest[0] = c.data.data[0].inheritable
|
||||||
|
dest[1] = c.data.data[1].inheritable
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *capsFile) Empty(which CapType) bool {
|
||||||
|
var data [2]uint32
|
||||||
|
c.getData(which, data[:])
|
||||||
|
return data[0] == 0 && data[1] == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *capsFile) Full(which CapType) bool {
|
||||||
|
var data [2]uint32
|
||||||
|
c.getData(which, data[:])
|
||||||
|
if c.data.version == 0 {
|
||||||
|
return (data[0] & 0x7fffffff) == 0x7fffffff
|
||||||
|
}
|
||||||
|
if (data[0] & 0xffffffff) != 0xffffffff {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return (data[1] & capUpperMask) == capUpperMask
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *capsFile) Set(which CapType, caps ...Cap) {
|
||||||
|
for _, what := range caps {
|
||||||
|
var i uint
|
||||||
|
if what > 31 {
|
||||||
|
if c.data.version == 1 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
i = uint(what) >> 5
|
||||||
|
what %= 32
|
||||||
|
}
|
||||||
|
|
||||||
|
if which&EFFECTIVE != 0 {
|
||||||
|
c.data.effective[i] |= 1 << uint(what)
|
||||||
|
}
|
||||||
|
if which&PERMITTED != 0 {
|
||||||
|
c.data.data[i].permitted |= 1 << uint(what)
|
||||||
|
}
|
||||||
|
if which&INHERITABLE != 0 {
|
||||||
|
c.data.data[i].inheritable |= 1 << uint(what)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *capsFile) Unset(which CapType, caps ...Cap) {
|
||||||
|
for _, what := range caps {
|
||||||
|
var i uint
|
||||||
|
if what > 31 {
|
||||||
|
if c.data.version == 1 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
i = uint(what) >> 5
|
||||||
|
what %= 32
|
||||||
|
}
|
||||||
|
|
||||||
|
if which&EFFECTIVE != 0 {
|
||||||
|
c.data.effective[i] &= ^(1 << uint(what))
|
||||||
|
}
|
||||||
|
if which&PERMITTED != 0 {
|
||||||
|
c.data.data[i].permitted &= ^(1 << uint(what))
|
||||||
|
}
|
||||||
|
if which&INHERITABLE != 0 {
|
||||||
|
c.data.data[i].inheritable &= ^(1 << uint(what))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *capsFile) Fill(kind CapType) {
|
||||||
|
if kind&CAPS == CAPS {
|
||||||
|
c.data.effective[0] = 0xffffffff
|
||||||
|
c.data.data[0].permitted = 0xffffffff
|
||||||
|
c.data.data[0].inheritable = 0
|
||||||
|
if c.data.version == 2 {
|
||||||
|
c.data.effective[1] = 0xffffffff
|
||||||
|
c.data.data[1].permitted = 0xffffffff
|
||||||
|
c.data.data[1].inheritable = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *capsFile) Clear(kind CapType) {
|
||||||
|
if kind&CAPS == CAPS {
|
||||||
|
c.data.effective[0] = 0
|
||||||
|
c.data.data[0].permitted = 0
|
||||||
|
c.data.data[0].inheritable = 0
|
||||||
|
if c.data.version == 2 {
|
||||||
|
c.data.effective[1] = 0
|
||||||
|
c.data.data[1].permitted = 0
|
||||||
|
c.data.data[1].inheritable = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *capsFile) StringCap(which CapType) (ret string) {
|
||||||
|
return mkStringCap(c, which)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *capsFile) String() (ret string) {
|
||||||
|
return mkString(c, INHERITABLE)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *capsFile) Load() (err error) {
|
||||||
|
return getVfsCap(c.path, &c.data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *capsFile) Apply(kind CapType) (err error) {
|
||||||
|
if kind&CAPS == CAPS {
|
||||||
|
return setVfsCap(c.path, &c.data)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
19
vendor/github.com/syndtr/gocapability/capability/capability_noop.go
generated
vendored
Normal file
19
vendor/github.com/syndtr/gocapability/capability/capability_noop.go
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
// Copyright (c) 2013, Suryandaru Triandana <syndtr@gmail.com>
|
||||||
|
// All rights reserved.
|
||||||
|
//
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build !linux
|
||||||
|
|
||||||
|
package capability
|
||||||
|
|
||||||
|
import "errors"
|
||||||
|
|
||||||
|
func newPid(pid int) (Capabilities, error) {
|
||||||
|
return nil, errors.New("not supported")
|
||||||
|
}
|
||||||
|
|
||||||
|
func newFile(path string) (Capabilities, error) {
|
||||||
|
return nil, errors.New("not supported")
|
||||||
|
}
|
268
vendor/github.com/syndtr/gocapability/capability/enum.go
generated
vendored
Normal file
268
vendor/github.com/syndtr/gocapability/capability/enum.go
generated
vendored
Normal file
@ -0,0 +1,268 @@
|
|||||||
|
// Copyright (c) 2013, Suryandaru Triandana <syndtr@gmail.com>
|
||||||
|
// All rights reserved.
|
||||||
|
//
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
package capability
|
||||||
|
|
||||||
|
type CapType uint
|
||||||
|
|
||||||
|
func (c CapType) String() string {
|
||||||
|
switch c {
|
||||||
|
case EFFECTIVE:
|
||||||
|
return "effective"
|
||||||
|
case PERMITTED:
|
||||||
|
return "permitted"
|
||||||
|
case INHERITABLE:
|
||||||
|
return "inheritable"
|
||||||
|
case BOUNDING:
|
||||||
|
return "bounding"
|
||||||
|
case CAPS:
|
||||||
|
return "caps"
|
||||||
|
case AMBIENT:
|
||||||
|
return "ambient"
|
||||||
|
}
|
||||||
|
return "unknown"
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
EFFECTIVE CapType = 1 << iota
|
||||||
|
PERMITTED
|
||||||
|
INHERITABLE
|
||||||
|
BOUNDING
|
||||||
|
AMBIENT
|
||||||
|
|
||||||
|
CAPS = EFFECTIVE | PERMITTED | INHERITABLE
|
||||||
|
BOUNDS = BOUNDING
|
||||||
|
AMBS = AMBIENT
|
||||||
|
)
|
||||||
|
|
||||||
|
//go:generate go run enumgen/gen.go
|
||||||
|
type Cap int
|
||||||
|
|
||||||
|
// POSIX-draft defined capabilities.
|
||||||
|
const (
|
||||||
|
// In a system with the [_POSIX_CHOWN_RESTRICTED] option defined, this
|
||||||
|
// overrides the restriction of changing file ownership and group
|
||||||
|
// ownership.
|
||||||
|
CAP_CHOWN = Cap(0)
|
||||||
|
|
||||||
|
// Override all DAC access, including ACL execute access if
|
||||||
|
// [_POSIX_ACL] is defined. Excluding DAC access covered by
|
||||||
|
// CAP_LINUX_IMMUTABLE.
|
||||||
|
CAP_DAC_OVERRIDE = Cap(1)
|
||||||
|
|
||||||
|
// Overrides all DAC restrictions regarding read and search on files
|
||||||
|
// and directories, including ACL restrictions if [_POSIX_ACL] is
|
||||||
|
// defined. Excluding DAC access covered by CAP_LINUX_IMMUTABLE.
|
||||||
|
CAP_DAC_READ_SEARCH = Cap(2)
|
||||||
|
|
||||||
|
// Overrides all restrictions about allowed operations on files, where
|
||||||
|
// file owner ID must be equal to the user ID, except where CAP_FSETID
|
||||||
|
// is applicable. It doesn't override MAC and DAC restrictions.
|
||||||
|
CAP_FOWNER = Cap(3)
|
||||||
|
|
||||||
|
// Overrides the following restrictions that the effective user ID
|
||||||
|
// shall match the file owner ID when setting the S_ISUID and S_ISGID
|
||||||
|
// bits on that file; that the effective group ID (or one of the
|
||||||
|
// supplementary group IDs) shall match the file owner ID when setting
|
||||||
|
// the S_ISGID bit on that file; that the S_ISUID and S_ISGID bits are
|
||||||
|
// cleared on successful return from chown(2) (not implemented).
|
||||||
|
CAP_FSETID = Cap(4)
|
||||||
|
|
||||||
|
// Overrides the restriction that the real or effective user ID of a
|
||||||
|
// process sending a signal must match the real or effective user ID
|
||||||
|
// of the process receiving the signal.
|
||||||
|
CAP_KILL = Cap(5)
|
||||||
|
|
||||||
|
// Allows setgid(2) manipulation
|
||||||
|
// Allows setgroups(2)
|
||||||
|
// Allows forged gids on socket credentials passing.
|
||||||
|
CAP_SETGID = Cap(6)
|
||||||
|
|
||||||
|
// Allows set*uid(2) manipulation (including fsuid).
|
||||||
|
// Allows forged pids on socket credentials passing.
|
||||||
|
CAP_SETUID = Cap(7)
|
||||||
|
|
||||||
|
// Linux-specific capabilities
|
||||||
|
|
||||||
|
// Without VFS support for capabilities:
|
||||||
|
// Transfer any capability in your permitted set to any pid,
|
||||||
|
// remove any capability in your permitted set from any pid
|
||||||
|
// With VFS support for capabilities (neither of above, but)
|
||||||
|
// Add any capability from current's capability bounding set
|
||||||
|
// to the current process' inheritable set
|
||||||
|
// Allow taking bits out of capability bounding set
|
||||||
|
// Allow modification of the securebits for a process
|
||||||
|
CAP_SETPCAP = Cap(8)
|
||||||
|
|
||||||
|
// Allow modification of S_IMMUTABLE and S_APPEND file attributes
|
||||||
|
CAP_LINUX_IMMUTABLE = Cap(9)
|
||||||
|
|
||||||
|
// Allows binding to TCP/UDP sockets below 1024
|
||||||
|
// Allows binding to ATM VCIs below 32
|
||||||
|
CAP_NET_BIND_SERVICE = Cap(10)
|
||||||
|
|
||||||
|
// Allow broadcasting, listen to multicast
|
||||||
|
CAP_NET_BROADCAST = Cap(11)
|
||||||
|
|
||||||
|
// Allow interface configuration
|
||||||
|
// Allow administration of IP firewall, masquerading and accounting
|
||||||
|
// Allow setting debug option on sockets
|
||||||
|
// Allow modification of routing tables
|
||||||
|
// Allow setting arbitrary process / process group ownership on
|
||||||
|
// sockets
|
||||||
|
// Allow binding to any address for transparent proxying (also via NET_RAW)
|
||||||
|
// Allow setting TOS (type of service)
|
||||||
|
// Allow setting promiscuous mode
|
||||||
|
// Allow clearing driver statistics
|
||||||
|
// Allow multicasting
|
||||||
|
// Allow read/write of device-specific registers
|
||||||
|
// Allow activation of ATM control sockets
|
||||||
|
CAP_NET_ADMIN = Cap(12)
|
||||||
|
|
||||||
|
// Allow use of RAW sockets
|
||||||
|
// Allow use of PACKET sockets
|
||||||
|
// Allow binding to any address for transparent proxying (also via NET_ADMIN)
|
||||||
|
CAP_NET_RAW = Cap(13)
|
||||||
|
|
||||||
|
// Allow locking of shared memory segments
|
||||||
|
// Allow mlock and mlockall (which doesn't really have anything to do
|
||||||
|
// with IPC)
|
||||||
|
CAP_IPC_LOCK = Cap(14)
|
||||||
|
|
||||||
|
// Override IPC ownership checks
|
||||||
|
CAP_IPC_OWNER = Cap(15)
|
||||||
|
|
||||||
|
// Insert and remove kernel modules - modify kernel without limit
|
||||||
|
CAP_SYS_MODULE = Cap(16)
|
||||||
|
|
||||||
|
// Allow ioperm/iopl access
|
||||||
|
// Allow sending USB messages to any device via /proc/bus/usb
|
||||||
|
CAP_SYS_RAWIO = Cap(17)
|
||||||
|
|
||||||
|
// Allow use of chroot()
|
||||||
|
CAP_SYS_CHROOT = Cap(18)
|
||||||
|
|
||||||
|
// Allow ptrace() of any process
|
||||||
|
CAP_SYS_PTRACE = Cap(19)
|
||||||
|
|
||||||
|
// Allow configuration of process accounting
|
||||||
|
CAP_SYS_PACCT = Cap(20)
|
||||||
|
|
||||||
|
// Allow configuration of the secure attention key
|
||||||
|
// Allow administration of the random device
|
||||||
|
// Allow examination and configuration of disk quotas
|
||||||
|
// Allow setting the domainname
|
||||||
|
// Allow setting the hostname
|
||||||
|
// Allow calling bdflush()
|
||||||
|
// Allow mount() and umount(), setting up new smb connection
|
||||||
|
// Allow some autofs root ioctls
|
||||||
|
// Allow nfsservctl
|
||||||
|
// Allow VM86_REQUEST_IRQ
|
||||||
|
// Allow to read/write pci config on alpha
|
||||||
|
// Allow irix_prctl on mips (setstacksize)
|
||||||
|
// Allow flushing all cache on m68k (sys_cacheflush)
|
||||||
|
// Allow removing semaphores
|
||||||
|
// Used instead of CAP_CHOWN to "chown" IPC message queues, semaphores
|
||||||
|
// and shared memory
|
||||||
|
// Allow locking/unlocking of shared memory segment
|
||||||
|
// Allow turning swap on/off
|
||||||
|
// Allow forged pids on socket credentials passing
|
||||||
|
// Allow setting readahead and flushing buffers on block devices
|
||||||
|
// Allow setting geometry in floppy driver
|
||||||
|
// Allow turning DMA on/off in xd driver
|
||||||
|
// Allow administration of md devices (mostly the above, but some
|
||||||
|
// extra ioctls)
|
||||||
|
// Allow tuning the ide driver
|
||||||
|
// Allow access to the nvram device
|
||||||
|
// Allow administration of apm_bios, serial and bttv (TV) device
|
||||||
|
// Allow manufacturer commands in isdn CAPI support driver
|
||||||
|
// Allow reading non-standardized portions of pci configuration space
|
||||||
|
// Allow DDI debug ioctl on sbpcd driver
|
||||||
|
// Allow setting up serial ports
|
||||||
|
// Allow sending raw qic-117 commands
|
||||||
|
// Allow enabling/disabling tagged queuing on SCSI controllers and sending
|
||||||
|
// arbitrary SCSI commands
|
||||||
|
// Allow setting encryption key on loopback filesystem
|
||||||
|
// Allow setting zone reclaim policy
|
||||||
|
CAP_SYS_ADMIN = Cap(21)
|
||||||
|
|
||||||
|
// Allow use of reboot()
|
||||||
|
CAP_SYS_BOOT = Cap(22)
|
||||||
|
|
||||||
|
// Allow raising priority and setting priority on other (different
|
||||||
|
// UID) processes
|
||||||
|
// Allow use of FIFO and round-robin (realtime) scheduling on own
|
||||||
|
// processes and setting the scheduling algorithm used by another
|
||||||
|
// process.
|
||||||
|
// Allow setting cpu affinity on other processes
|
||||||
|
CAP_SYS_NICE = Cap(23)
|
||||||
|
|
||||||
|
// Override resource limits. Set resource limits.
|
||||||
|
// Override quota limits.
|
||||||
|
// Override reserved space on ext2 filesystem
|
||||||
|
// Modify data journaling mode on ext3 filesystem (uses journaling
|
||||||
|
// resources)
|
||||||
|
// NOTE: ext2 honors fsuid when checking for resource overrides, so
|
||||||
|
// you can override using fsuid too
|
||||||
|
// Override size restrictions on IPC message queues
|
||||||
|
// Allow more than 64hz interrupts from the real-time clock
|
||||||
|
// Override max number of consoles on console allocation
|
||||||
|
// Override max number of keymaps
|
||||||
|
CAP_SYS_RESOURCE = Cap(24)
|
||||||
|
|
||||||
|
// Allow manipulation of system clock
|
||||||
|
// Allow irix_stime on mips
|
||||||
|
// Allow setting the real-time clock
|
||||||
|
CAP_SYS_TIME = Cap(25)
|
||||||
|
|
||||||
|
// Allow configuration of tty devices
|
||||||
|
// Allow vhangup() of tty
|
||||||
|
CAP_SYS_TTY_CONFIG = Cap(26)
|
||||||
|
|
||||||
|
// Allow the privileged aspects of mknod()
|
||||||
|
CAP_MKNOD = Cap(27)
|
||||||
|
|
||||||
|
// Allow taking of leases on files
|
||||||
|
CAP_LEASE = Cap(28)
|
||||||
|
|
||||||
|
CAP_AUDIT_WRITE = Cap(29)
|
||||||
|
CAP_AUDIT_CONTROL = Cap(30)
|
||||||
|
CAP_SETFCAP = Cap(31)
|
||||||
|
|
||||||
|
// Override MAC access.
|
||||||
|
// The base kernel enforces no MAC policy.
|
||||||
|
// An LSM may enforce a MAC policy, and if it does and it chooses
|
||||||
|
// to implement capability based overrides of that policy, this is
|
||||||
|
// the capability it should use to do so.
|
||||||
|
CAP_MAC_OVERRIDE = Cap(32)
|
||||||
|
|
||||||
|
// Allow MAC configuration or state changes.
|
||||||
|
// The base kernel requires no MAC configuration.
|
||||||
|
// An LSM may enforce a MAC policy, and if it does and it chooses
|
||||||
|
// to implement capability based checks on modifications to that
|
||||||
|
// policy or the data required to maintain it, this is the
|
||||||
|
// capability it should use to do so.
|
||||||
|
CAP_MAC_ADMIN = Cap(33)
|
||||||
|
|
||||||
|
// Allow configuring the kernel's syslog (printk behaviour)
|
||||||
|
CAP_SYSLOG = Cap(34)
|
||||||
|
|
||||||
|
// Allow triggering something that will wake the system
|
||||||
|
CAP_WAKE_ALARM = Cap(35)
|
||||||
|
|
||||||
|
// Allow preventing system suspends
|
||||||
|
CAP_BLOCK_SUSPEND = Cap(36)
|
||||||
|
|
||||||
|
// Allow reading audit messages from the kernel
|
||||||
|
CAP_AUDIT_READ = Cap(37)
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// Highest valid capability of the running kernel.
|
||||||
|
CAP_LAST_CAP = Cap(63)
|
||||||
|
|
||||||
|
capUpperMask = ^uint32(0)
|
||||||
|
)
|
129
vendor/github.com/syndtr/gocapability/capability/enum_gen.go
generated
vendored
Normal file
129
vendor/github.com/syndtr/gocapability/capability/enum_gen.go
generated
vendored
Normal file
@ -0,0 +1,129 @@
|
|||||||
|
// generated file; DO NOT EDIT - use go generate in directory with source
|
||||||
|
|
||||||
|
package capability
|
||||||
|
|
||||||
|
func (c Cap) String() string {
|
||||||
|
switch c {
|
||||||
|
case CAP_CHOWN:
|
||||||
|
return "chown"
|
||||||
|
case CAP_DAC_OVERRIDE:
|
||||||
|
return "dac_override"
|
||||||
|
case CAP_DAC_READ_SEARCH:
|
||||||
|
return "dac_read_search"
|
||||||
|
case CAP_FOWNER:
|
||||||
|
return "fowner"
|
||||||
|
case CAP_FSETID:
|
||||||
|
return "fsetid"
|
||||||
|
case CAP_KILL:
|
||||||
|
return "kill"
|
||||||
|
case CAP_SETGID:
|
||||||
|
return "setgid"
|
||||||
|
case CAP_SETUID:
|
||||||
|
return "setuid"
|
||||||
|
case CAP_SETPCAP:
|
||||||
|
return "setpcap"
|
||||||
|
case CAP_LINUX_IMMUTABLE:
|
||||||
|
return "linux_immutable"
|
||||||
|
case CAP_NET_BIND_SERVICE:
|
||||||
|
return "net_bind_service"
|
||||||
|
case CAP_NET_BROADCAST:
|
||||||
|
return "net_broadcast"
|
||||||
|
case CAP_NET_ADMIN:
|
||||||
|
return "net_admin"
|
||||||
|
case CAP_NET_RAW:
|
||||||
|
return "net_raw"
|
||||||
|
case CAP_IPC_LOCK:
|
||||||
|
return "ipc_lock"
|
||||||
|
case CAP_IPC_OWNER:
|
||||||
|
return "ipc_owner"
|
||||||
|
case CAP_SYS_MODULE:
|
||||||
|
return "sys_module"
|
||||||
|
case CAP_SYS_RAWIO:
|
||||||
|
return "sys_rawio"
|
||||||
|
case CAP_SYS_CHROOT:
|
||||||
|
return "sys_chroot"
|
||||||
|
case CAP_SYS_PTRACE:
|
||||||
|
return "sys_ptrace"
|
||||||
|
case CAP_SYS_PACCT:
|
||||||
|
return "sys_pacct"
|
||||||
|
case CAP_SYS_ADMIN:
|
||||||
|
return "sys_admin"
|
||||||
|
case CAP_SYS_BOOT:
|
||||||
|
return "sys_boot"
|
||||||
|
case CAP_SYS_NICE:
|
||||||
|
return "sys_nice"
|
||||||
|
case CAP_SYS_RESOURCE:
|
||||||
|
return "sys_resource"
|
||||||
|
case CAP_SYS_TIME:
|
||||||
|
return "sys_time"
|
||||||
|
case CAP_SYS_TTY_CONFIG:
|
||||||
|
return "sys_tty_config"
|
||||||
|
case CAP_MKNOD:
|
||||||
|
return "mknod"
|
||||||
|
case CAP_LEASE:
|
||||||
|
return "lease"
|
||||||
|
case CAP_AUDIT_WRITE:
|
||||||
|
return "audit_write"
|
||||||
|
case CAP_AUDIT_CONTROL:
|
||||||
|
return "audit_control"
|
||||||
|
case CAP_SETFCAP:
|
||||||
|
return "setfcap"
|
||||||
|
case CAP_MAC_OVERRIDE:
|
||||||
|
return "mac_override"
|
||||||
|
case CAP_MAC_ADMIN:
|
||||||
|
return "mac_admin"
|
||||||
|
case CAP_SYSLOG:
|
||||||
|
return "syslog"
|
||||||
|
case CAP_WAKE_ALARM:
|
||||||
|
return "wake_alarm"
|
||||||
|
case CAP_BLOCK_SUSPEND:
|
||||||
|
return "block_suspend"
|
||||||
|
case CAP_AUDIT_READ:
|
||||||
|
return "audit_read"
|
||||||
|
}
|
||||||
|
return "unknown"
|
||||||
|
}
|
||||||
|
|
||||||
|
// List returns list of all supported capabilities
|
||||||
|
func List() []Cap {
|
||||||
|
return []Cap{
|
||||||
|
CAP_CHOWN,
|
||||||
|
CAP_DAC_OVERRIDE,
|
||||||
|
CAP_DAC_READ_SEARCH,
|
||||||
|
CAP_FOWNER,
|
||||||
|
CAP_FSETID,
|
||||||
|
CAP_KILL,
|
||||||
|
CAP_SETGID,
|
||||||
|
CAP_SETUID,
|
||||||
|
CAP_SETPCAP,
|
||||||
|
CAP_LINUX_IMMUTABLE,
|
||||||
|
CAP_NET_BIND_SERVICE,
|
||||||
|
CAP_NET_BROADCAST,
|
||||||
|
CAP_NET_ADMIN,
|
||||||
|
CAP_NET_RAW,
|
||||||
|
CAP_IPC_LOCK,
|
||||||
|
CAP_IPC_OWNER,
|
||||||
|
CAP_SYS_MODULE,
|
||||||
|
CAP_SYS_RAWIO,
|
||||||
|
CAP_SYS_CHROOT,
|
||||||
|
CAP_SYS_PTRACE,
|
||||||
|
CAP_SYS_PACCT,
|
||||||
|
CAP_SYS_ADMIN,
|
||||||
|
CAP_SYS_BOOT,
|
||||||
|
CAP_SYS_NICE,
|
||||||
|
CAP_SYS_RESOURCE,
|
||||||
|
CAP_SYS_TIME,
|
||||||
|
CAP_SYS_TTY_CONFIG,
|
||||||
|
CAP_MKNOD,
|
||||||
|
CAP_LEASE,
|
||||||
|
CAP_AUDIT_WRITE,
|
||||||
|
CAP_AUDIT_CONTROL,
|
||||||
|
CAP_SETFCAP,
|
||||||
|
CAP_MAC_OVERRIDE,
|
||||||
|
CAP_MAC_ADMIN,
|
||||||
|
CAP_SYSLOG,
|
||||||
|
CAP_WAKE_ALARM,
|
||||||
|
CAP_BLOCK_SUSPEND,
|
||||||
|
CAP_AUDIT_READ,
|
||||||
|
}
|
||||||
|
}
|
154
vendor/github.com/syndtr/gocapability/capability/syscall_linux.go
generated
vendored
Normal file
154
vendor/github.com/syndtr/gocapability/capability/syscall_linux.go
generated
vendored
Normal file
@ -0,0 +1,154 @@
|
|||||||
|
// Copyright (c) 2013, Suryandaru Triandana <syndtr@gmail.com>
|
||||||
|
// All rights reserved.
|
||||||
|
//
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
package capability
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
type capHeader struct {
|
||||||
|
version uint32
|
||||||
|
pid int
|
||||||
|
}
|
||||||
|
|
||||||
|
type capData struct {
|
||||||
|
effective uint32
|
||||||
|
permitted uint32
|
||||||
|
inheritable uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func capget(hdr *capHeader, data *capData) (err error) {
|
||||||
|
_, _, e1 := syscall.Syscall(syscall.SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0)
|
||||||
|
if e1 != 0 {
|
||||||
|
err = e1
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func capset(hdr *capHeader, data *capData) (err error) {
|
||||||
|
_, _, e1 := syscall.Syscall(syscall.SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0)
|
||||||
|
if e1 != 0 {
|
||||||
|
err = e1
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// not yet in syscall
|
||||||
|
const (
|
||||||
|
pr_CAP_AMBIENT = 47
|
||||||
|
pr_CAP_AMBIENT_IS_SET = uintptr(1)
|
||||||
|
pr_CAP_AMBIENT_RAISE = uintptr(2)
|
||||||
|
pr_CAP_AMBIENT_LOWER = uintptr(3)
|
||||||
|
pr_CAP_AMBIENT_CLEAR_ALL = uintptr(4)
|
||||||
|
)
|
||||||
|
|
||||||
|
func prctl(option int, arg2, arg3, arg4, arg5 uintptr) (err error) {
|
||||||
|
_, _, e1 := syscall.Syscall6(syscall.SYS_PRCTL, uintptr(option), arg2, arg3, arg4, arg5, 0)
|
||||||
|
if e1 != 0 {
|
||||||
|
err = e1
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
vfsXattrName = "security.capability"
|
||||||
|
|
||||||
|
vfsCapVerMask = 0xff000000
|
||||||
|
vfsCapVer1 = 0x01000000
|
||||||
|
vfsCapVer2 = 0x02000000
|
||||||
|
|
||||||
|
vfsCapFlagMask = ^vfsCapVerMask
|
||||||
|
vfsCapFlageffective = 0x000001
|
||||||
|
|
||||||
|
vfscapDataSizeV1 = 4 * (1 + 2*1)
|
||||||
|
vfscapDataSizeV2 = 4 * (1 + 2*2)
|
||||||
|
)
|
||||||
|
|
||||||
|
type vfscapData struct {
|
||||||
|
magic uint32
|
||||||
|
data [2]struct {
|
||||||
|
permitted uint32
|
||||||
|
inheritable uint32
|
||||||
|
}
|
||||||
|
effective [2]uint32
|
||||||
|
version int8
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
_vfsXattrName *byte
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
_vfsXattrName, _ = syscall.BytePtrFromString(vfsXattrName)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getVfsCap(path string, dest *vfscapData) (err error) {
|
||||||
|
var _p0 *byte
|
||||||
|
_p0, err = syscall.BytePtrFromString(path)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
r0, _, e1 := syscall.Syscall6(syscall.SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_vfsXattrName)), uintptr(unsafe.Pointer(dest)), vfscapDataSizeV2, 0, 0)
|
||||||
|
if e1 != 0 {
|
||||||
|
if e1 == syscall.ENODATA {
|
||||||
|
dest.version = 2
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = e1
|
||||||
|
}
|
||||||
|
switch dest.magic & vfsCapVerMask {
|
||||||
|
case vfsCapVer1:
|
||||||
|
dest.version = 1
|
||||||
|
if r0 != vfscapDataSizeV1 {
|
||||||
|
return syscall.EINVAL
|
||||||
|
}
|
||||||
|
dest.data[1].permitted = 0
|
||||||
|
dest.data[1].inheritable = 0
|
||||||
|
case vfsCapVer2:
|
||||||
|
dest.version = 2
|
||||||
|
if r0 != vfscapDataSizeV2 {
|
||||||
|
return syscall.EINVAL
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return syscall.EINVAL
|
||||||
|
}
|
||||||
|
if dest.magic&vfsCapFlageffective != 0 {
|
||||||
|
dest.effective[0] = dest.data[0].permitted | dest.data[0].inheritable
|
||||||
|
dest.effective[1] = dest.data[1].permitted | dest.data[1].inheritable
|
||||||
|
} else {
|
||||||
|
dest.effective[0] = 0
|
||||||
|
dest.effective[1] = 0
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func setVfsCap(path string, data *vfscapData) (err error) {
|
||||||
|
var _p0 *byte
|
||||||
|
_p0, err = syscall.BytePtrFromString(path)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
var size uintptr
|
||||||
|
if data.version == 1 {
|
||||||
|
data.magic = vfsCapVer1
|
||||||
|
size = vfscapDataSizeV1
|
||||||
|
} else if data.version == 2 {
|
||||||
|
data.magic = vfsCapVer2
|
||||||
|
if data.effective[0] != 0 || data.effective[1] != 0 {
|
||||||
|
data.magic |= vfsCapFlageffective
|
||||||
|
}
|
||||||
|
size = vfscapDataSizeV2
|
||||||
|
} else {
|
||||||
|
return syscall.EINVAL
|
||||||
|
}
|
||||||
|
_, _, e1 := syscall.Syscall6(syscall.SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_vfsXattrName)), uintptr(unsafe.Pointer(data)), size, 0, 0)
|
||||||
|
if e1 != 0 {
|
||||||
|
err = e1
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
3
vendor/github.com/tchap/go-patricia/AUTHORS
generated
vendored
Normal file
3
vendor/github.com/tchap/go-patricia/AUTHORS
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
This is the complete list of go-patricia copyright holders:
|
||||||
|
|
||||||
|
Ondřej Kupka <ondra.cap@gmail.com>
|
20
vendor/github.com/tchap/go-patricia/LICENSE
generated
vendored
Normal file
20
vendor/github.com/tchap/go-patricia/LICENSE
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2014 The AUTHORS
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||||
|
this software and associated documentation files (the "Software"), to deal in
|
||||||
|
the Software without restriction, including without limitation the rights to
|
||||||
|
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||||
|
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||||
|
subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||||
|
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||||
|
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||||
|
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||||
|
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
325
vendor/github.com/tchap/go-patricia/patricia/children.go
generated
vendored
Normal file
325
vendor/github.com/tchap/go-patricia/patricia/children.go
generated
vendored
Normal file
@ -0,0 +1,325 @@
|
|||||||
|
// Copyright (c) 2014 The go-patricia AUTHORS
|
||||||
|
//
|
||||||
|
// Use of this source code is governed by The MIT License
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package patricia
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
type childList interface {
|
||||||
|
length() int
|
||||||
|
head() *Trie
|
||||||
|
add(child *Trie) childList
|
||||||
|
remove(b byte)
|
||||||
|
replace(b byte, child *Trie)
|
||||||
|
next(b byte) *Trie
|
||||||
|
walk(prefix *Prefix, visitor VisitorFunc) error
|
||||||
|
print(w io.Writer, indent int)
|
||||||
|
total() int
|
||||||
|
}
|
||||||
|
|
||||||
|
type tries []*Trie
|
||||||
|
|
||||||
|
func (t tries) Len() int {
|
||||||
|
return len(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t tries) Less(i, j int) bool {
|
||||||
|
strings := sort.StringSlice{string(t[i].prefix), string(t[j].prefix)}
|
||||||
|
return strings.Less(0, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t tries) Swap(i, j int) {
|
||||||
|
t[i], t[j] = t[j], t[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
type sparseChildList struct {
|
||||||
|
children tries
|
||||||
|
}
|
||||||
|
|
||||||
|
func newSparseChildList(maxChildrenPerSparseNode int) childList {
|
||||||
|
return &sparseChildList{
|
||||||
|
children: make(tries, 0, maxChildrenPerSparseNode),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (list *sparseChildList) length() int {
|
||||||
|
return len(list.children)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (list *sparseChildList) head() *Trie {
|
||||||
|
return list.children[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (list *sparseChildList) add(child *Trie) childList {
|
||||||
|
// Search for an empty spot and insert the child if possible.
|
||||||
|
if len(list.children) != cap(list.children) {
|
||||||
|
list.children = append(list.children, child)
|
||||||
|
return list
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise we have to transform to the dense list type.
|
||||||
|
return newDenseChildList(list, child)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (list *sparseChildList) remove(b byte) {
|
||||||
|
for i, node := range list.children {
|
||||||
|
if node.prefix[0] == b {
|
||||||
|
list.children[i] = list.children[len(list.children)-1]
|
||||||
|
list.children[len(list.children)-1] = nil
|
||||||
|
list.children = list.children[:len(list.children)-1]
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is not supposed to be reached.
|
||||||
|
panic("removing non-existent child")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (list *sparseChildList) replace(b byte, child *Trie) {
|
||||||
|
// Make a consistency check.
|
||||||
|
if p0 := child.prefix[0]; p0 != b {
|
||||||
|
panic(fmt.Errorf("child prefix mismatch: %v != %v", p0, b))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Seek the child and replace it.
|
||||||
|
for i, node := range list.children {
|
||||||
|
if node.prefix[0] == b {
|
||||||
|
list.children[i] = child
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (list *sparseChildList) next(b byte) *Trie {
|
||||||
|
for _, child := range list.children {
|
||||||
|
if child.prefix[0] == b {
|
||||||
|
return child
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (list *sparseChildList) walk(prefix *Prefix, visitor VisitorFunc) error {
|
||||||
|
|
||||||
|
sort.Sort(list.children)
|
||||||
|
|
||||||
|
for _, child := range list.children {
|
||||||
|
*prefix = append(*prefix, child.prefix...)
|
||||||
|
if child.item != nil {
|
||||||
|
err := visitor(*prefix, child.item)
|
||||||
|
if err != nil {
|
||||||
|
if err == SkipSubtree {
|
||||||
|
*prefix = (*prefix)[:len(*prefix)-len(child.prefix)]
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
*prefix = (*prefix)[:len(*prefix)-len(child.prefix)]
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err := child.children.walk(prefix, visitor)
|
||||||
|
*prefix = (*prefix)[:len(*prefix)-len(child.prefix)]
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (list *sparseChildList) total() int {
|
||||||
|
tot := 0
|
||||||
|
for _, child := range list.children {
|
||||||
|
if child != nil {
|
||||||
|
tot = tot + child.total()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return tot
|
||||||
|
}
|
||||||
|
|
||||||
|
func (list *sparseChildList) print(w io.Writer, indent int) {
|
||||||
|
for _, child := range list.children {
|
||||||
|
if child != nil {
|
||||||
|
child.print(w, indent)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type denseChildList struct {
|
||||||
|
min int
|
||||||
|
max int
|
||||||
|
numChildren int
|
||||||
|
headIndex int
|
||||||
|
children []*Trie
|
||||||
|
}
|
||||||
|
|
||||||
|
func newDenseChildList(list *sparseChildList, child *Trie) childList {
|
||||||
|
var (
|
||||||
|
min int = 255
|
||||||
|
max int = 0
|
||||||
|
)
|
||||||
|
for _, child := range list.children {
|
||||||
|
b := int(child.prefix[0])
|
||||||
|
if b < min {
|
||||||
|
min = b
|
||||||
|
}
|
||||||
|
if b > max {
|
||||||
|
max = b
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
b := int(child.prefix[0])
|
||||||
|
if b < min {
|
||||||
|
min = b
|
||||||
|
}
|
||||||
|
if b > max {
|
||||||
|
max = b
|
||||||
|
}
|
||||||
|
|
||||||
|
children := make([]*Trie, max-min+1)
|
||||||
|
for _, child := range list.children {
|
||||||
|
children[int(child.prefix[0])-min] = child
|
||||||
|
}
|
||||||
|
children[int(child.prefix[0])-min] = child
|
||||||
|
|
||||||
|
return &denseChildList{
|
||||||
|
min: min,
|
||||||
|
max: max,
|
||||||
|
numChildren: list.length() + 1,
|
||||||
|
headIndex: 0,
|
||||||
|
children: children,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (list *denseChildList) length() int {
|
||||||
|
return list.numChildren
|
||||||
|
}
|
||||||
|
|
||||||
|
func (list *denseChildList) head() *Trie {
|
||||||
|
return list.children[list.headIndex]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (list *denseChildList) add(child *Trie) childList {
|
||||||
|
b := int(child.prefix[0])
|
||||||
|
var i int
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case list.min <= b && b <= list.max:
|
||||||
|
if list.children[b-list.min] != nil {
|
||||||
|
panic("dense child list collision detected")
|
||||||
|
}
|
||||||
|
i = b - list.min
|
||||||
|
list.children[i] = child
|
||||||
|
|
||||||
|
case b < list.min:
|
||||||
|
children := make([]*Trie, list.max-b+1)
|
||||||
|
i = 0
|
||||||
|
children[i] = child
|
||||||
|
copy(children[list.min-b:], list.children)
|
||||||
|
list.children = children
|
||||||
|
list.min = b
|
||||||
|
|
||||||
|
default: // b > list.max
|
||||||
|
children := make([]*Trie, b-list.min+1)
|
||||||
|
i = b - list.min
|
||||||
|
children[i] = child
|
||||||
|
copy(children, list.children)
|
||||||
|
list.children = children
|
||||||
|
list.max = b
|
||||||
|
}
|
||||||
|
|
||||||
|
list.numChildren++
|
||||||
|
if i < list.headIndex {
|
||||||
|
list.headIndex = i
|
||||||
|
}
|
||||||
|
return list
|
||||||
|
}
|
||||||
|
|
||||||
|
func (list *denseChildList) remove(b byte) {
|
||||||
|
i := int(b) - list.min
|
||||||
|
if list.children[i] == nil {
|
||||||
|
// This is not supposed to be reached.
|
||||||
|
panic("removing non-existent child")
|
||||||
|
}
|
||||||
|
list.numChildren--
|
||||||
|
list.children[i] = nil
|
||||||
|
|
||||||
|
// Update head index.
|
||||||
|
if i == list.headIndex {
|
||||||
|
for ; i < len(list.children); i++ {
|
||||||
|
if list.children[i] != nil {
|
||||||
|
list.headIndex = i
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (list *denseChildList) replace(b byte, child *Trie) {
|
||||||
|
// Make a consistency check.
|
||||||
|
if p0 := child.prefix[0]; p0 != b {
|
||||||
|
panic(fmt.Errorf("child prefix mismatch: %v != %v", p0, b))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Replace the child.
|
||||||
|
list.children[int(b)-list.min] = child
|
||||||
|
}
|
||||||
|
|
||||||
|
func (list *denseChildList) next(b byte) *Trie {
|
||||||
|
i := int(b)
|
||||||
|
if i < list.min || list.max < i {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return list.children[i-list.min]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (list *denseChildList) walk(prefix *Prefix, visitor VisitorFunc) error {
|
||||||
|
for _, child := range list.children {
|
||||||
|
if child == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
*prefix = append(*prefix, child.prefix...)
|
||||||
|
if child.item != nil {
|
||||||
|
if err := visitor(*prefix, child.item); err != nil {
|
||||||
|
if err == SkipSubtree {
|
||||||
|
*prefix = (*prefix)[:len(*prefix)-len(child.prefix)]
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
*prefix = (*prefix)[:len(*prefix)-len(child.prefix)]
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err := child.children.walk(prefix, visitor)
|
||||||
|
*prefix = (*prefix)[:len(*prefix)-len(child.prefix)]
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (list *denseChildList) print(w io.Writer, indent int) {
|
||||||
|
for _, child := range list.children {
|
||||||
|
if child != nil {
|
||||||
|
child.print(w, indent)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (list *denseChildList) total() int {
|
||||||
|
tot := 0
|
||||||
|
for _, child := range list.children {
|
||||||
|
if child != nil {
|
||||||
|
tot = tot + child.total()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return tot
|
||||||
|
}
|
594
vendor/github.com/tchap/go-patricia/patricia/patricia.go
generated
vendored
Normal file
594
vendor/github.com/tchap/go-patricia/patricia/patricia.go
generated
vendored
Normal file
@ -0,0 +1,594 @@
|
|||||||
|
// Copyright (c) 2014 The go-patricia AUTHORS
|
||||||
|
//
|
||||||
|
// Use of this source code is governed by The MIT License
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package patricia
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
//------------------------------------------------------------------------------
|
||||||
|
// Trie
|
||||||
|
//------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
const (
|
||||||
|
DefaultMaxPrefixPerNode = 10
|
||||||
|
DefaultMaxChildrenPerSparseNode = 8
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
Prefix []byte
|
||||||
|
Item interface{}
|
||||||
|
VisitorFunc func(prefix Prefix, item Item) error
|
||||||
|
)
|
||||||
|
|
||||||
|
// Trie is a generic patricia trie that allows fast retrieval of items by prefix.
|
||||||
|
// and other funky stuff.
|
||||||
|
//
|
||||||
|
// Trie is not thread-safe.
|
||||||
|
type Trie struct {
|
||||||
|
prefix Prefix
|
||||||
|
item Item
|
||||||
|
|
||||||
|
maxPrefixPerNode int
|
||||||
|
maxChildrenPerSparseNode int
|
||||||
|
|
||||||
|
children childList
|
||||||
|
}
|
||||||
|
|
||||||
|
// Public API ------------------------------------------------------------------
|
||||||
|
|
||||||
|
type Option func(*Trie)
|
||||||
|
|
||||||
|
// Trie constructor.
|
||||||
|
func NewTrie(options ...Option) *Trie {
|
||||||
|
trie := &Trie{}
|
||||||
|
|
||||||
|
for _, opt := range options {
|
||||||
|
opt(trie)
|
||||||
|
}
|
||||||
|
|
||||||
|
if trie.maxPrefixPerNode <= 0 {
|
||||||
|
trie.maxPrefixPerNode = DefaultMaxPrefixPerNode
|
||||||
|
}
|
||||||
|
if trie.maxChildrenPerSparseNode <= 0 {
|
||||||
|
trie.maxChildrenPerSparseNode = DefaultMaxChildrenPerSparseNode
|
||||||
|
}
|
||||||
|
|
||||||
|
trie.children = newSparseChildList(trie.maxChildrenPerSparseNode)
|
||||||
|
return trie
|
||||||
|
}
|
||||||
|
|
||||||
|
func MaxPrefixPerNode(value int) Option {
|
||||||
|
return func(trie *Trie) {
|
||||||
|
trie.maxPrefixPerNode = value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func MaxChildrenPerSparseNode(value int) Option {
|
||||||
|
return func(trie *Trie) {
|
||||||
|
trie.maxChildrenPerSparseNode = value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Item returns the item stored in the root of this trie.
|
||||||
|
func (trie *Trie) Item() Item {
|
||||||
|
return trie.item
|
||||||
|
}
|
||||||
|
|
||||||
|
// Insert inserts a new item into the trie using the given prefix. Insert does
|
||||||
|
// not replace existing items. It returns false if an item was already in place.
|
||||||
|
func (trie *Trie) Insert(key Prefix, item Item) (inserted bool) {
|
||||||
|
return trie.put(key, item, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set works much like Insert, but it always sets the item, possibly replacing
|
||||||
|
// the item previously inserted.
|
||||||
|
func (trie *Trie) Set(key Prefix, item Item) {
|
||||||
|
trie.put(key, item, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get returns the item located at key.
|
||||||
|
//
|
||||||
|
// This method is a bit dangerous, because Get can as well end up in an internal
|
||||||
|
// node that is not really representing any user-defined value. So when nil is
|
||||||
|
// a valid value being used, it is not possible to tell if the value was inserted
|
||||||
|
// into the tree by the user or not. A possible workaround for this is not to use
|
||||||
|
// nil interface as a valid value, even using zero value of any type is enough
|
||||||
|
// to prevent this bad behaviour.
|
||||||
|
func (trie *Trie) Get(key Prefix) (item Item) {
|
||||||
|
_, node, found, leftover := trie.findSubtree(key)
|
||||||
|
if !found || len(leftover) != 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return node.item
|
||||||
|
}
|
||||||
|
|
||||||
|
// Match returns what Get(prefix) != nil would return. The same warning as for
|
||||||
|
// Get applies here as well.
|
||||||
|
func (trie *Trie) Match(prefix Prefix) (matchedExactly bool) {
|
||||||
|
return trie.Get(prefix) != nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MatchSubtree returns true when there is a subtree representing extensions
|
||||||
|
// to key, that is if there are any keys in the tree which have key as prefix.
|
||||||
|
func (trie *Trie) MatchSubtree(key Prefix) (matched bool) {
|
||||||
|
_, _, matched, _ = trie.findSubtree(key)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Visit calls visitor on every node containing a non-nil item
|
||||||
|
// in alphabetical order.
|
||||||
|
//
|
||||||
|
// If an error is returned from visitor, the function stops visiting the tree
|
||||||
|
// and returns that error, unless it is a special error - SkipSubtree. In that
|
||||||
|
// case Visit skips the subtree represented by the current node and continues
|
||||||
|
// elsewhere.
|
||||||
|
func (trie *Trie) Visit(visitor VisitorFunc) error {
|
||||||
|
return trie.walk(nil, visitor)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (trie *Trie) size() int {
|
||||||
|
n := 0
|
||||||
|
|
||||||
|
trie.walk(nil, func(prefix Prefix, item Item) error {
|
||||||
|
n++
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (trie *Trie) total() int {
|
||||||
|
return 1 + trie.children.total()
|
||||||
|
}
|
||||||
|
|
||||||
|
// VisitSubtree works much like Visit, but it only visits nodes matching prefix.
|
||||||
|
func (trie *Trie) VisitSubtree(prefix Prefix, visitor VisitorFunc) error {
|
||||||
|
// Nil prefix not allowed.
|
||||||
|
if prefix == nil {
|
||||||
|
panic(ErrNilPrefix)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Empty trie must be handled explicitly.
|
||||||
|
if trie.prefix == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Locate the relevant subtree.
|
||||||
|
_, root, found, leftover := trie.findSubtree(prefix)
|
||||||
|
if !found {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
prefix = append(prefix, leftover...)
|
||||||
|
|
||||||
|
// Visit it.
|
||||||
|
return root.walk(prefix, visitor)
|
||||||
|
}
|
||||||
|
|
||||||
|
// VisitPrefixes visits only nodes that represent prefixes of key.
|
||||||
|
// To say the obvious, returning SkipSubtree from visitor makes no sense here.
|
||||||
|
func (trie *Trie) VisitPrefixes(key Prefix, visitor VisitorFunc) error {
|
||||||
|
// Nil key not allowed.
|
||||||
|
if key == nil {
|
||||||
|
panic(ErrNilPrefix)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Empty trie must be handled explicitly.
|
||||||
|
if trie.prefix == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Walk the path matching key prefixes.
|
||||||
|
node := trie
|
||||||
|
prefix := key
|
||||||
|
offset := 0
|
||||||
|
for {
|
||||||
|
// Compute what part of prefix matches.
|
||||||
|
common := node.longestCommonPrefixLength(key)
|
||||||
|
key = key[common:]
|
||||||
|
offset += common
|
||||||
|
|
||||||
|
// Partial match means that there is no subtree matching prefix.
|
||||||
|
if common < len(node.prefix) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Call the visitor.
|
||||||
|
if item := node.item; item != nil {
|
||||||
|
if err := visitor(prefix[:offset], item); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(key) == 0 {
|
||||||
|
// This node represents key, we are finished.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// There is some key suffix left, move to the children.
|
||||||
|
child := node.children.next(key[0])
|
||||||
|
if child == nil {
|
||||||
|
// There is nowhere to continue, return.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
node = child
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete deletes the item represented by the given prefix.
|
||||||
|
//
|
||||||
|
// True is returned if the matching node was found and deleted.
|
||||||
|
func (trie *Trie) Delete(key Prefix) (deleted bool) {
|
||||||
|
// Nil prefix not allowed.
|
||||||
|
if key == nil {
|
||||||
|
panic(ErrNilPrefix)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Empty trie must be handled explicitly.
|
||||||
|
if trie.prefix == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find the relevant node.
|
||||||
|
path, found, _ := trie.findSubtreePath(key)
|
||||||
|
if !found {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
node := path[len(path)-1]
|
||||||
|
var parent *Trie
|
||||||
|
if len(path) != 1 {
|
||||||
|
parent = path[len(path)-2]
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the item is already set to nil, there is nothing to do.
|
||||||
|
if node.item == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete the item.
|
||||||
|
node.item = nil
|
||||||
|
|
||||||
|
// Initialise i before goto.
|
||||||
|
// Will be used later in a loop.
|
||||||
|
i := len(path) - 1
|
||||||
|
|
||||||
|
// In case there are some child nodes, we cannot drop the whole subtree.
|
||||||
|
// We can try to compact nodes, though.
|
||||||
|
if node.children.length() != 0 {
|
||||||
|
goto Compact
|
||||||
|
}
|
||||||
|
|
||||||
|
// In case we are at the root, just reset it and we are done.
|
||||||
|
if parent == nil {
|
||||||
|
node.reset()
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// We can drop a subtree.
|
||||||
|
// Find the first ancestor that has its value set or it has 2 or more child nodes.
|
||||||
|
// That will be the node where to drop the subtree at.
|
||||||
|
for ; i >= 0; i-- {
|
||||||
|
if current := path[i]; current.item != nil || current.children.length() >= 2 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle the case when there is no such node.
|
||||||
|
// In other words, we can reset the whole tree.
|
||||||
|
if i == -1 {
|
||||||
|
path[0].reset()
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// We can just remove the subtree here.
|
||||||
|
node = path[i]
|
||||||
|
if i == 0 {
|
||||||
|
parent = nil
|
||||||
|
} else {
|
||||||
|
parent = path[i-1]
|
||||||
|
}
|
||||||
|
// i+1 is always a valid index since i is never pointing to the last node.
|
||||||
|
// The loop above skips at least the last node since we are sure that the item
|
||||||
|
// is set to nil and it has no children, othewise we would be compacting instead.
|
||||||
|
node.children.remove(path[i+1].prefix[0])
|
||||||
|
|
||||||
|
Compact:
|
||||||
|
// The node is set to the first non-empty ancestor,
|
||||||
|
// so try to compact since that might be possible now.
|
||||||
|
if compacted := node.compact(); compacted != node {
|
||||||
|
if parent == nil {
|
||||||
|
*node = *compacted
|
||||||
|
} else {
|
||||||
|
parent.children.replace(node.prefix[0], compacted)
|
||||||
|
*parent = *parent.compact()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteSubtree finds the subtree exactly matching prefix and deletes it.
|
||||||
|
//
|
||||||
|
// True is returned if the subtree was found and deleted.
|
||||||
|
func (trie *Trie) DeleteSubtree(prefix Prefix) (deleted bool) {
|
||||||
|
// Nil prefix not allowed.
|
||||||
|
if prefix == nil {
|
||||||
|
panic(ErrNilPrefix)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Empty trie must be handled explicitly.
|
||||||
|
if trie.prefix == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Locate the relevant subtree.
|
||||||
|
parent, root, found, _ := trie.findSubtree(prefix)
|
||||||
|
if !found {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we are in the root of the trie, reset the trie.
|
||||||
|
if parent == nil {
|
||||||
|
root.reset()
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise remove the root node from its parent.
|
||||||
|
parent.children.remove(root.prefix[0])
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Internal helper methods -----------------------------------------------------
|
||||||
|
|
||||||
|
func (trie *Trie) empty() bool {
|
||||||
|
return trie.item == nil && trie.children.length() == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (trie *Trie) reset() {
|
||||||
|
trie.prefix = nil
|
||||||
|
trie.children = newSparseChildList(trie.maxPrefixPerNode)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (trie *Trie) put(key Prefix, item Item, replace bool) (inserted bool) {
|
||||||
|
// Nil prefix not allowed.
|
||||||
|
if key == nil {
|
||||||
|
panic(ErrNilPrefix)
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
common int
|
||||||
|
node *Trie = trie
|
||||||
|
child *Trie
|
||||||
|
)
|
||||||
|
|
||||||
|
if node.prefix == nil {
|
||||||
|
if len(key) <= trie.maxPrefixPerNode {
|
||||||
|
node.prefix = key
|
||||||
|
goto InsertItem
|
||||||
|
}
|
||||||
|
node.prefix = key[:trie.maxPrefixPerNode]
|
||||||
|
key = key[trie.maxPrefixPerNode:]
|
||||||
|
goto AppendChild
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
// Compute the longest common prefix length.
|
||||||
|
common = node.longestCommonPrefixLength(key)
|
||||||
|
key = key[common:]
|
||||||
|
|
||||||
|
// Only a part matches, split.
|
||||||
|
if common < len(node.prefix) {
|
||||||
|
goto SplitPrefix
|
||||||
|
}
|
||||||
|
|
||||||
|
// common == len(node.prefix) since never (common > len(node.prefix))
|
||||||
|
// common == len(former key) <-> 0 == len(key)
|
||||||
|
// -> former key == node.prefix
|
||||||
|
if len(key) == 0 {
|
||||||
|
goto InsertItem
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check children for matching prefix.
|
||||||
|
child = node.children.next(key[0])
|
||||||
|
if child == nil {
|
||||||
|
goto AppendChild
|
||||||
|
}
|
||||||
|
node = child
|
||||||
|
}
|
||||||
|
|
||||||
|
SplitPrefix:
|
||||||
|
// Split the prefix if necessary.
|
||||||
|
child = new(Trie)
|
||||||
|
*child = *node
|
||||||
|
*node = *NewTrie()
|
||||||
|
node.prefix = child.prefix[:common]
|
||||||
|
child.prefix = child.prefix[common:]
|
||||||
|
child = child.compact()
|
||||||
|
node.children = node.children.add(child)
|
||||||
|
|
||||||
|
AppendChild:
|
||||||
|
// Keep appending children until whole prefix is inserted.
|
||||||
|
// This loop starts with empty node.prefix that needs to be filled.
|
||||||
|
for len(key) != 0 {
|
||||||
|
child := NewTrie()
|
||||||
|
if len(key) <= trie.maxPrefixPerNode {
|
||||||
|
child.prefix = key
|
||||||
|
node.children = node.children.add(child)
|
||||||
|
node = child
|
||||||
|
goto InsertItem
|
||||||
|
} else {
|
||||||
|
child.prefix = key[:trie.maxPrefixPerNode]
|
||||||
|
key = key[trie.maxPrefixPerNode:]
|
||||||
|
node.children = node.children.add(child)
|
||||||
|
node = child
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
InsertItem:
|
||||||
|
// Try to insert the item if possible.
|
||||||
|
if replace || node.item == nil {
|
||||||
|
node.item = item
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (trie *Trie) compact() *Trie {
|
||||||
|
// Only a node with a single child can be compacted.
|
||||||
|
if trie.children.length() != 1 {
|
||||||
|
return trie
|
||||||
|
}
|
||||||
|
|
||||||
|
child := trie.children.head()
|
||||||
|
|
||||||
|
// If any item is set, we cannot compact since we want to retain
|
||||||
|
// the ability to do searching by key. This makes compaction less usable,
|
||||||
|
// but that simply cannot be avoided.
|
||||||
|
if trie.item != nil || child.item != nil {
|
||||||
|
return trie
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make sure the combined prefixes fit into a single node.
|
||||||
|
if len(trie.prefix)+len(child.prefix) > trie.maxPrefixPerNode {
|
||||||
|
return trie
|
||||||
|
}
|
||||||
|
|
||||||
|
// Concatenate the prefixes, move the items.
|
||||||
|
child.prefix = append(trie.prefix, child.prefix...)
|
||||||
|
if trie.item != nil {
|
||||||
|
child.item = trie.item
|
||||||
|
}
|
||||||
|
|
||||||
|
return child
|
||||||
|
}
|
||||||
|
|
||||||
|
func (trie *Trie) findSubtree(prefix Prefix) (parent *Trie, root *Trie, found bool, leftover Prefix) {
|
||||||
|
// Find the subtree matching prefix.
|
||||||
|
root = trie
|
||||||
|
for {
|
||||||
|
// Compute what part of prefix matches.
|
||||||
|
common := root.longestCommonPrefixLength(prefix)
|
||||||
|
prefix = prefix[common:]
|
||||||
|
|
||||||
|
// We used up the whole prefix, subtree found.
|
||||||
|
if len(prefix) == 0 {
|
||||||
|
found = true
|
||||||
|
leftover = root.prefix[common:]
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Partial match means that there is no subtree matching prefix.
|
||||||
|
if common < len(root.prefix) {
|
||||||
|
leftover = root.prefix[common:]
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// There is some prefix left, move to the children.
|
||||||
|
child := root.children.next(prefix[0])
|
||||||
|
if child == nil {
|
||||||
|
// There is nowhere to continue, there is no subtree matching prefix.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
parent = root
|
||||||
|
root = child
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (trie *Trie) findSubtreePath(prefix Prefix) (path []*Trie, found bool, leftover Prefix) {
|
||||||
|
// Find the subtree matching prefix.
|
||||||
|
root := trie
|
||||||
|
var subtreePath []*Trie
|
||||||
|
for {
|
||||||
|
// Append the current root to the path.
|
||||||
|
subtreePath = append(subtreePath, root)
|
||||||
|
|
||||||
|
// Compute what part of prefix matches.
|
||||||
|
common := root.longestCommonPrefixLength(prefix)
|
||||||
|
prefix = prefix[common:]
|
||||||
|
|
||||||
|
// We used up the whole prefix, subtree found.
|
||||||
|
if len(prefix) == 0 {
|
||||||
|
path = subtreePath
|
||||||
|
found = true
|
||||||
|
leftover = root.prefix[common:]
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Partial match means that there is no subtree matching prefix.
|
||||||
|
if common < len(root.prefix) {
|
||||||
|
leftover = root.prefix[common:]
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// There is some prefix left, move to the children.
|
||||||
|
child := root.children.next(prefix[0])
|
||||||
|
if child == nil {
|
||||||
|
// There is nowhere to continue, there is no subtree matching prefix.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
root = child
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (trie *Trie) walk(actualRootPrefix Prefix, visitor VisitorFunc) error {
|
||||||
|
var prefix Prefix
|
||||||
|
// Allocate a bit more space for prefix at the beginning.
|
||||||
|
if actualRootPrefix == nil {
|
||||||
|
prefix = make(Prefix, 32+len(trie.prefix))
|
||||||
|
copy(prefix, trie.prefix)
|
||||||
|
prefix = prefix[:len(trie.prefix)]
|
||||||
|
} else {
|
||||||
|
prefix = make(Prefix, 32+len(actualRootPrefix))
|
||||||
|
copy(prefix, actualRootPrefix)
|
||||||
|
prefix = prefix[:len(actualRootPrefix)]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Visit the root first. Not that this works for empty trie as well since
|
||||||
|
// in that case item == nil && len(children) == 0.
|
||||||
|
if trie.item != nil {
|
||||||
|
if err := visitor(prefix, trie.item); err != nil {
|
||||||
|
if err == SkipSubtree {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Then continue to the children.
|
||||||
|
return trie.children.walk(&prefix, visitor)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (trie *Trie) longestCommonPrefixLength(prefix Prefix) (i int) {
|
||||||
|
for ; i < len(prefix) && i < len(trie.prefix) && prefix[i] == trie.prefix[i]; i++ {
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (trie *Trie) dump() string {
|
||||||
|
writer := &bytes.Buffer{}
|
||||||
|
trie.print(writer, 0)
|
||||||
|
return writer.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (trie *Trie) print(writer io.Writer, indent int) {
|
||||||
|
fmt.Fprintf(writer, "%s%s %v\n", strings.Repeat(" ", indent), string(trie.prefix), trie.item)
|
||||||
|
trie.children.print(writer, indent+2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Errors ----------------------------------------------------------------------
|
||||||
|
|
||||||
|
var (
|
||||||
|
SkipSubtree = errors.New("Skip this subtree")
|
||||||
|
ErrNilPrefix = errors.New("Nil prefix passed into a method call")
|
||||||
|
)
|
21
vendor/github.com/tonistiigi/fifo/LICENSE
generated
vendored
Normal file
21
vendor/github.com/tonistiigi/fifo/LICENSE
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
MIT
|
||||||
|
|
||||||
|
Copyright (C) 2016 Tõnis Tiigi <tonistiigi@gmail.com>
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in
|
||||||
|
all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
THE SOFTWARE.
|
13
vendor/github.com/tonistiigi/fifo/Makefile
generated
vendored
Normal file
13
vendor/github.com/tonistiigi/fifo/Makefile
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
.PHONY: fmt vet test deps
|
||||||
|
|
||||||
|
test: deps
|
||||||
|
go test -v ./...
|
||||||
|
|
||||||
|
deps:
|
||||||
|
go get -d -t ./...
|
||||||
|
|
||||||
|
fmt:
|
||||||
|
gofmt -s -l .
|
||||||
|
|
||||||
|
vet:
|
||||||
|
go vet ./...
|
216
vendor/github.com/tonistiigi/fifo/fifo.go
generated
vendored
Normal file
216
vendor/github.com/tonistiigi/fifo/fifo.go
generated
vendored
Normal file
@ -0,0 +1,216 @@
|
|||||||
|
package fifo
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"runtime"
|
||||||
|
"sync"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
)
|
||||||
|
|
||||||
|
type fifo struct {
|
||||||
|
flag int
|
||||||
|
opened chan struct{}
|
||||||
|
closed chan struct{}
|
||||||
|
closing chan struct{}
|
||||||
|
err error
|
||||||
|
file *os.File
|
||||||
|
closingOnce sync.Once // close has been called
|
||||||
|
closedOnce sync.Once // fifo is closed
|
||||||
|
handle *handle
|
||||||
|
}
|
||||||
|
|
||||||
|
var leakCheckWg *sync.WaitGroup
|
||||||
|
|
||||||
|
// OpenFifo opens a fifo. Returns io.ReadWriteCloser.
|
||||||
|
// Context can be used to cancel this function until open(2) has not returned.
|
||||||
|
// Accepted flags:
|
||||||
|
// - syscall.O_CREAT - create new fifo if one doesn't exist
|
||||||
|
// - syscall.O_RDONLY - open fifo only from reader side
|
||||||
|
// - syscall.O_WRONLY - open fifo only from writer side
|
||||||
|
// - syscall.O_RDWR - open fifo from both sides, never block on syscall level
|
||||||
|
// - syscall.O_NONBLOCK - return io.ReadWriteCloser even if other side of the
|
||||||
|
// fifo isn't open. read/write will be connected after the actual fifo is
|
||||||
|
// open or after fifo is closed.
|
||||||
|
func OpenFifo(ctx context.Context, fn string, flag int, perm os.FileMode) (io.ReadWriteCloser, error) {
|
||||||
|
if _, err := os.Stat(fn); err != nil {
|
||||||
|
if os.IsNotExist(err) && flag&syscall.O_CREAT != 0 {
|
||||||
|
if err := mkfifo(fn, uint32(perm&os.ModePerm)); err != nil && !os.IsExist(err) {
|
||||||
|
return nil, errors.Wrapf(err, "error creating fifo %v", fn)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
block := flag&syscall.O_NONBLOCK == 0 || flag&syscall.O_RDWR != 0
|
||||||
|
|
||||||
|
flag &= ^syscall.O_CREAT
|
||||||
|
flag &= ^syscall.O_NONBLOCK
|
||||||
|
|
||||||
|
h, err := getHandle(fn)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
f := &fifo{
|
||||||
|
handle: h,
|
||||||
|
flag: flag,
|
||||||
|
opened: make(chan struct{}),
|
||||||
|
closed: make(chan struct{}),
|
||||||
|
closing: make(chan struct{}),
|
||||||
|
}
|
||||||
|
|
||||||
|
wg := leakCheckWg
|
||||||
|
if wg != nil {
|
||||||
|
wg.Add(2)
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
if wg != nil {
|
||||||
|
defer wg.Done()
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
f.Close()
|
||||||
|
case <-f.opened:
|
||||||
|
case <-f.closed:
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
go func() {
|
||||||
|
if wg != nil {
|
||||||
|
defer wg.Done()
|
||||||
|
}
|
||||||
|
var file *os.File
|
||||||
|
fn, err := h.Path()
|
||||||
|
if err == nil {
|
||||||
|
file, err = os.OpenFile(fn, flag, 0)
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-f.closing:
|
||||||
|
if err == nil {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
err = ctx.Err()
|
||||||
|
default:
|
||||||
|
err = errors.Errorf("fifo %v was closed before opening", h.Name())
|
||||||
|
}
|
||||||
|
if file != nil {
|
||||||
|
file.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
f.closedOnce.Do(func() {
|
||||||
|
f.err = err
|
||||||
|
close(f.closed)
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
f.file = file
|
||||||
|
close(f.opened)
|
||||||
|
}()
|
||||||
|
if block {
|
||||||
|
select {
|
||||||
|
case <-f.opened:
|
||||||
|
case <-f.closed:
|
||||||
|
return nil, f.err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return f, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read from a fifo to a byte array.
|
||||||
|
func (f *fifo) Read(b []byte) (int, error) {
|
||||||
|
if f.flag&syscall.O_WRONLY > 0 {
|
||||||
|
return 0, errors.New("reading from write-only fifo")
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-f.opened:
|
||||||
|
return f.file.Read(b)
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-f.opened:
|
||||||
|
return f.file.Read(b)
|
||||||
|
case <-f.closed:
|
||||||
|
return 0, errors.New("reading from a closed fifo")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write from byte array to a fifo.
|
||||||
|
func (f *fifo) Write(b []byte) (int, error) {
|
||||||
|
if f.flag&(syscall.O_WRONLY|syscall.O_RDWR) == 0 {
|
||||||
|
return 0, errors.New("writing to read-only fifo")
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-f.opened:
|
||||||
|
return f.file.Write(b)
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-f.opened:
|
||||||
|
return f.file.Write(b)
|
||||||
|
case <-f.closed:
|
||||||
|
return 0, errors.New("writing to a closed fifo")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close the fifo. Next reads/writes will error. This method can also be used
|
||||||
|
// before open(2) has returned and fifo was never opened.
|
||||||
|
func (f *fifo) Close() (retErr error) {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-f.closed:
|
||||||
|
f.handle.Close()
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
select {
|
||||||
|
case <-f.opened:
|
||||||
|
f.closedOnce.Do(func() {
|
||||||
|
retErr = f.file.Close()
|
||||||
|
f.err = retErr
|
||||||
|
close(f.closed)
|
||||||
|
})
|
||||||
|
default:
|
||||||
|
if f.flag&syscall.O_RDWR != 0 {
|
||||||
|
runtime.Gosched()
|
||||||
|
break
|
||||||
|
}
|
||||||
|
f.closingOnce.Do(func() {
|
||||||
|
close(f.closing)
|
||||||
|
})
|
||||||
|
reverseMode := syscall.O_WRONLY
|
||||||
|
if f.flag&syscall.O_WRONLY > 0 {
|
||||||
|
reverseMode = syscall.O_RDONLY
|
||||||
|
}
|
||||||
|
fn, err := f.handle.Path()
|
||||||
|
// if Close() is called concurrently(shouldn't) it may cause error
|
||||||
|
// because handle is closed
|
||||||
|
select {
|
||||||
|
case <-f.closed:
|
||||||
|
default:
|
||||||
|
if err != nil {
|
||||||
|
// Path has become invalid. We will leak a goroutine.
|
||||||
|
// This case should not happen in linux.
|
||||||
|
f.closedOnce.Do(func() {
|
||||||
|
f.err = err
|
||||||
|
close(f.closed)
|
||||||
|
})
|
||||||
|
<-f.closed
|
||||||
|
break
|
||||||
|
}
|
||||||
|
f, err := os.OpenFile(fn, reverseMode|syscall.O_NONBLOCK, 0)
|
||||||
|
if err == nil {
|
||||||
|
f.Close()
|
||||||
|
}
|
||||||
|
runtime.Gosched()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
76
vendor/github.com/tonistiigi/fifo/handle_linux.go
generated
vendored
Normal file
76
vendor/github.com/tonistiigi/fifo/handle_linux.go
generated
vendored
Normal file
@ -0,0 +1,76 @@
|
|||||||
|
// +build linux
|
||||||
|
|
||||||
|
package fifo
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"sync"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
const O_PATH = 010000000
|
||||||
|
|
||||||
|
type handle struct {
|
||||||
|
f *os.File
|
||||||
|
dev uint64
|
||||||
|
ino uint64
|
||||||
|
closeOnce sync.Once
|
||||||
|
name string
|
||||||
|
}
|
||||||
|
|
||||||
|
func getHandle(fn string) (*handle, error) {
|
||||||
|
f, err := os.OpenFile(fn, O_PATH, 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "failed to open %v with O_PATH", fn)
|
||||||
|
}
|
||||||
|
|
||||||
|
var stat syscall.Stat_t
|
||||||
|
if err := syscall.Fstat(int(f.Fd()), &stat); err != nil {
|
||||||
|
f.Close()
|
||||||
|
return nil, errors.Wrapf(err, "failed to stat handle %v", f.Fd())
|
||||||
|
}
|
||||||
|
|
||||||
|
h := &handle{
|
||||||
|
f: f,
|
||||||
|
name: fn,
|
||||||
|
dev: stat.Dev,
|
||||||
|
ino: stat.Ino,
|
||||||
|
}
|
||||||
|
|
||||||
|
// check /proc just in case
|
||||||
|
if _, err := os.Stat(h.procPath()); err != nil {
|
||||||
|
f.Close()
|
||||||
|
return nil, errors.Wrapf(err, "couldn't stat %v", h.procPath())
|
||||||
|
}
|
||||||
|
|
||||||
|
return h, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *handle) procPath() string {
|
||||||
|
return fmt.Sprintf("/proc/self/fd/%d", h.f.Fd())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *handle) Name() string {
|
||||||
|
return h.name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *handle) Path() (string, error) {
|
||||||
|
var stat syscall.Stat_t
|
||||||
|
if err := syscall.Stat(h.procPath(), &stat); err != nil {
|
||||||
|
return "", errors.Wrapf(err, "path %v could not be statted", h.procPath())
|
||||||
|
}
|
||||||
|
if stat.Dev != h.dev || stat.Ino != h.ino {
|
||||||
|
return "", errors.Errorf("failed to verify handle %v/%v %v/%v", stat.Dev, h.dev, stat.Ino, h.ino)
|
||||||
|
}
|
||||||
|
return h.procPath(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *handle) Close() error {
|
||||||
|
h.closeOnce.Do(func() {
|
||||||
|
h.f.Close()
|
||||||
|
})
|
||||||
|
return nil
|
||||||
|
}
|
49
vendor/github.com/tonistiigi/fifo/handle_nolinux.go
generated
vendored
Normal file
49
vendor/github.com/tonistiigi/fifo/handle_nolinux.go
generated
vendored
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
// +build !linux
|
||||||
|
|
||||||
|
package fifo
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
type handle struct {
|
||||||
|
fn string
|
||||||
|
dev uint64
|
||||||
|
ino uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
func getHandle(fn string) (*handle, error) {
|
||||||
|
var stat syscall.Stat_t
|
||||||
|
if err := syscall.Stat(fn, &stat); err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "failed to stat %v", fn)
|
||||||
|
}
|
||||||
|
|
||||||
|
h := &handle{
|
||||||
|
fn: fn,
|
||||||
|
dev: uint64(stat.Dev),
|
||||||
|
ino: stat.Ino,
|
||||||
|
}
|
||||||
|
|
||||||
|
return h, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *handle) Path() (string, error) {
|
||||||
|
var stat syscall.Stat_t
|
||||||
|
if err := syscall.Stat(h.fn, &stat); err != nil {
|
||||||
|
return "", errors.Wrapf(err, "path %v could not be statted", h.fn)
|
||||||
|
}
|
||||||
|
if uint64(stat.Dev) != h.dev || stat.Ino != h.ino {
|
||||||
|
return "", errors.Errorf("failed to verify handle %v/%v %v/%v for %v", stat.Dev, h.dev, stat.Ino, h.ino, h.fn)
|
||||||
|
}
|
||||||
|
return h.fn, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *handle) Name() string {
|
||||||
|
return h.fn
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *handle) Close() error {
|
||||||
|
return nil
|
||||||
|
}
|
9
vendor/github.com/tonistiigi/fifo/mkfifo_nosolaris.go
generated
vendored
Normal file
9
vendor/github.com/tonistiigi/fifo/mkfifo_nosolaris.go
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
// +build !solaris
|
||||||
|
|
||||||
|
package fifo
|
||||||
|
|
||||||
|
import "syscall"
|
||||||
|
|
||||||
|
func mkfifo(path string, mode uint32) (err error) {
|
||||||
|
return syscall.Mkfifo(path, mode)
|
||||||
|
}
|
11
vendor/github.com/tonistiigi/fifo/mkfifo_solaris.go
generated
vendored
Normal file
11
vendor/github.com/tonistiigi/fifo/mkfifo_solaris.go
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
// +build solaris
|
||||||
|
|
||||||
|
package fifo
|
||||||
|
|
||||||
|
import (
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
func mkfifo(path string, mode uint32) (err error) {
|
||||||
|
return unix.Mkfifo(path, mode)
|
||||||
|
}
|
30
vendor/github.com/tonistiigi/fifo/readme.md
generated
vendored
Normal file
30
vendor/github.com/tonistiigi/fifo/readme.md
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
### fifo
|
||||||
|
|
||||||
|
Go package for handling fifos in a sane way.
|
||||||
|
|
||||||
|
```
|
||||||
|
// OpenFifo opens a fifo. Returns io.ReadWriteCloser.
|
||||||
|
// Context can be used to cancel this function until open(2) has not returned.
|
||||||
|
// Accepted flags:
|
||||||
|
// - syscall.O_CREAT - create new fifo if one doesn't exist
|
||||||
|
// - syscall.O_RDONLY - open fifo only from reader side
|
||||||
|
// - syscall.O_WRONLY - open fifo only from writer side
|
||||||
|
// - syscall.O_RDWR - open fifo from both sides, never block on syscall level
|
||||||
|
// - syscall.O_NONBLOCK - return io.ReadWriteCloser even if other side of the
|
||||||
|
// fifo isn't open. read/write will be connected after the actual fifo is
|
||||||
|
// open or after fifo is closed.
|
||||||
|
func OpenFifo(ctx context.Context, fn string, flag int, perm os.FileMode) (io.ReadWriteCloser, error)
|
||||||
|
|
||||||
|
|
||||||
|
// Read from a fifo to a byte array.
|
||||||
|
func (f *fifo) Read(b []byte) (int, error)
|
||||||
|
|
||||||
|
|
||||||
|
// Write from byte array to a fifo.
|
||||||
|
func (f *fifo) Write(b []byte) (int, error)
|
||||||
|
|
||||||
|
|
||||||
|
// Close the fifo. Next reads/writes will error. This method can also be used
|
||||||
|
// before open(2) has returned and fifo was never opened.
|
||||||
|
func (f *fifo) Close() error
|
||||||
|
```
|
Loading…
Reference in New Issue
Block a user