Create/delete containerd containerd

Signed-off-by: Lantao Liu <lantaol@google.com>
This commit is contained in:
Lantao Liu 2017-06-15 23:13:48 +00:00
parent 6ca9c65578
commit cb9e104cf1
15 changed files with 1049 additions and 829 deletions

View File

@ -56,6 +56,8 @@ type SandboxMetadata struct {
CreatedAt int64 CreatedAt int64
// NetNS is the network namespace used by the sandbox. // NetNS is the network namespace used by the sandbox.
NetNS string NetNS string
// Pid is the process id of the sandbox.
Pid uint32
} }
// SandboxUpdateFunc is the function used to update SandboxMetadata. // SandboxUpdateFunc is the function used to update SandboxMetadata.

View File

@ -42,6 +42,7 @@ func TestSandboxStore(t *testing.T) {
}, },
CreatedAt: time.Now().UnixNano(), CreatedAt: time.Now().UnixNano(),
NetNS: "TestNetNS-1", NetNS: "TestNetNS-1",
Pid: 1001,
}, },
"2": { "2": {
ID: "2", ID: "2",
@ -56,6 +57,7 @@ func TestSandboxStore(t *testing.T) {
}, },
CreatedAt: time.Now().UnixNano(), CreatedAt: time.Now().UnixNano(),
NetNS: "TestNetNS-2", NetNS: "TestNetNS-2",
Pid: 1002,
}, },
"3": { "3": {
ID: "3", ID: "3",
@ -70,6 +72,7 @@ func TestSandboxStore(t *testing.T) {
}, },
CreatedAt: time.Now().UnixNano(), CreatedAt: time.Now().UnixNano(),
NetNS: "TestNetNS-3", NetNS: "TestNetNS-3",
Pid: 1003,
}, },
} }
assert := assertlib.New(t) assert := assertlib.New(t)

View File

@ -17,10 +17,18 @@ limitations under the License.
package server package server
import ( import (
"encoding/json"
"fmt" "fmt"
"strings"
"time" "time"
"github.com/containerd/containerd/api/services/containers"
prototypes "github.com/gogo/protobuf/types"
"github.com/golang/glog" "github.com/golang/glog"
imagespec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/opencontainers/runc/libcontainer/devices"
runtimespec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/generate"
"golang.org/x/net/context" "golang.org/x/net/context"
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1" runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1"
@ -43,6 +51,7 @@ func (c *criContainerdService) CreateContainer(ctx context.Context, r *runtime.C
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to find sandbox id %q: %v", r.GetPodSandboxId(), err) return nil, fmt.Errorf("failed to find sandbox id %q: %v", r.GetPodSandboxId(), err)
} }
sandboxID := sandbox.ID
// Generate unique id and name for the container and reserve the name. // Generate unique id and name for the container and reserve the name.
// Reserve the container name to avoid concurrent `CreateContainer` request creating // Reserve the container name to avoid concurrent `CreateContainer` request creating
@ -63,7 +72,7 @@ func (c *criContainerdService) CreateContainer(ctx context.Context, r *runtime.C
meta := metadata.ContainerMetadata{ meta := metadata.ContainerMetadata{
ID: id, ID: id,
Name: name, Name: name,
SandboxID: sandbox.ID, SandboxID: sandboxID,
Config: config, Config: config,
} }
@ -77,6 +86,20 @@ func (c *criContainerdService) CreateContainer(ctx context.Context, r *runtime.C
if imageMeta == nil { if imageMeta == nil {
return nil, fmt.Errorf("image %q not found", image) return nil, fmt.Errorf("image %q not found", image)
} }
// Generate container runtime spec.
mounts := c.generateContainerMounts(getSandboxRootDir(c.rootDir, sandboxID), config)
spec, err := c.generateContainerSpec(id, sandbox.Pid, config, sandboxConfig, imageMeta.Config, mounts)
if err != nil {
return nil, fmt.Errorf("failed to generate container %q spec: %v", id, err)
}
rawSpec, err := json.Marshal(spec)
if err != nil {
return nil, fmt.Errorf("failed to marshal oci spec %+v: %v", spec, err)
}
glog.V(4).Infof("Container spec: %+v", spec)
// Prepare container rootfs.
if config.GetLinux().GetSecurityContext().GetReadonlyRootfs() { if config.GetLinux().GetSecurityContext().GetReadonlyRootfs() {
if _, err := c.snapshotService.View(ctx, id, imageMeta.ChainID); err != nil { if _, err := c.snapshotService.View(ctx, id, imageMeta.ChainID); err != nil {
return nil, fmt.Errorf("failed to view container rootfs %q: %v", imageMeta.ChainID, err) return nil, fmt.Errorf("failed to view container rootfs %q: %v", imageMeta.ChainID, err)
@ -111,6 +134,30 @@ func (c *criContainerdService) CreateContainer(ctx context.Context, r *runtime.C
} }
}() }()
// Create containerd container.
if _, err = c.containerService.Create(ctx, &containers.CreateContainerRequest{
Container: containers.Container{
ID: id,
// TODO(random-liu): Checkpoint metadata into container labels.
Image: imageMeta.ID,
Runtime: defaultRuntime,
Spec: &prototypes.Any{
TypeUrl: runtimespec.Version,
Value: rawSpec,
},
RootFS: id,
},
}); err != nil {
return nil, fmt.Errorf("failed to create containerd container: %v", err)
}
defer func() {
if retErr != nil {
if _, err := c.containerService.Delete(ctx, &containers.DeleteContainerRequest{ID: id}); err != nil {
glog.Errorf("Failed to delete containerd container %q: %v", id, err)
}
}
}()
// Update container CreatedAt. // Update container CreatedAt.
meta.CreatedAt = time.Now().UnixNano() meta.CreatedAt = time.Now().UnixNano()
// Add container into container store. // Add container into container store.
@ -121,3 +168,283 @@ func (c *criContainerdService) CreateContainer(ctx context.Context, r *runtime.C
return &runtime.CreateContainerResponse{ContainerId: id}, nil return &runtime.CreateContainerResponse{ContainerId: id}, nil
} }
func (c *criContainerdService) generateContainerSpec(id string, sandboxPid uint32, config *runtime.ContainerConfig,
sandboxConfig *runtime.PodSandboxConfig, imageConfig *imagespec.ImageConfig, extraMounts []*runtime.Mount) (*runtimespec.Spec, error) {
// Creates a spec Generator with the default spec.
// TODO(random-liu): [P2] Move container runtime spec generation into a helper function.
g := generate.New()
// Set the relative path to the rootfs of the container from containerd's
// pre-defined directory.
g.SetRootPath(relativeRootfsPath)
if err := setOCIProcessArgs(&g, config, imageConfig); err != nil {
return nil, err
}
if config.GetWorkingDir() != "" {
g.SetProcessCwd(config.GetWorkingDir())
} else if imageConfig.WorkingDir != "" {
g.SetProcessCwd(imageConfig.WorkingDir)
}
// Apply envs from image config first, so that envs from container config
// can override them.
if err := addImageEnvs(&g, imageConfig.Env); err != nil {
return nil, err
}
for _, e := range config.GetEnvs() {
g.AddProcessEnv(e.GetKey(), e.GetValue())
}
// TODO: add setOCIPrivileged group all privileged logic together
securityContext := config.GetLinux().GetSecurityContext()
// Add extra mounts first so that CRI specified mounts can override.
addOCIBindMounts(&g, append(extraMounts, config.GetMounts()...), securityContext.GetPrivileged())
g.SetRootReadonly(securityContext.GetReadonlyRootfs())
if err := addOCIDevices(&g, config.GetDevices(), securityContext.GetPrivileged()); err != nil {
return nil, fmt.Errorf("failed to set devices mapping %+v: %v", config.GetDevices(), err)
}
// TODO(random-liu): [P1] Handle container logging, decorate and redirect to file.
setOCILinuxResource(&g, config.GetLinux().GetResources())
if sandboxConfig.GetLinux().GetCgroupParent() != "" {
cgroupsPath := getCgroupsPath(sandboxConfig.GetLinux().GetCgroupParent(), id)
g.SetLinuxCgroupsPath(cgroupsPath)
}
g.SetProcessTerminal(config.GetTty())
if err := setOCICapabilities(&g, securityContext.GetCapabilities(), securityContext.GetPrivileged()); err != nil {
return nil, fmt.Errorf("failed to set capabilities %+v: %v",
securityContext.GetCapabilities(), err)
}
// Set namespaces, share namespace with sandbox container.
setOCINamespaces(&g, securityContext.GetNamespaceOptions(), sandboxPid)
// TODO(random-liu): [P1] Set selinux options.
// TODO(random-liu): [P1] Set user/username.
supplementalGroups := securityContext.GetSupplementalGroups()
for _, group := range supplementalGroups {
g.AddProcessAdditionalGid(uint32(group))
}
// TODO(random-liu): [P2] Add apparmor and seccomp.
return g.Spec(), nil
}
// generateContainerMounts sets up necessary container mounts including /dev/shm, /etc/hosts
// and /etc/resolv.conf.
func (c *criContainerdService) generateContainerMounts(sandboxRootDir string, config *runtime.ContainerConfig) []*runtime.Mount {
var mounts []*runtime.Mount
securityContext := config.GetLinux().GetSecurityContext()
mounts = append(mounts, &runtime.Mount{
ContainerPath: etcHosts,
HostPath: getSandboxHosts(sandboxRootDir),
Readonly: securityContext.GetReadonlyRootfs(),
})
// Mount sandbox resolv.config.
// TODO: Need to figure out whether we should always mount it as read-only
mounts = append(mounts, &runtime.Mount{
ContainerPath: resolvConfPath,
HostPath: getResolvPath(sandboxRootDir),
Readonly: securityContext.GetReadonlyRootfs(),
})
sandboxDevShm := getSandboxDevShm(sandboxRootDir)
if securityContext.GetNamespaceOptions().GetHostIpc() {
sandboxDevShm = devShm
}
mounts = append(mounts, &runtime.Mount{
ContainerPath: devShm,
HostPath: sandboxDevShm,
Readonly: false,
})
return mounts
}
// setOCIProcessArgs sets process args. It returns error if the final arg list
// is empty.
func setOCIProcessArgs(g *generate.Generator, config *runtime.ContainerConfig, imageConfig *imagespec.ImageConfig) error {
command, args := config.GetCommand(), config.GetArgs()
// The following logic is migrated from https://github.com/moby/moby/blob/master/daemon/commit.go
// TODO(random-liu): Clearly define the commands overwrite behavior.
if len(command) == 0 {
if len(args) == 0 {
args = imageConfig.Cmd
}
if command == nil {
command = imageConfig.Entrypoint
}
}
if len(command) == 0 && len(args) == 0 {
return fmt.Errorf("no command specified")
}
g.SetProcessArgs(append(command, args...))
return nil
}
// addImageEnvs adds environment variables from image config. It returns error if
// an invalid environment variable is encountered.
func addImageEnvs(g *generate.Generator, imageEnvs []string) error {
for _, e := range imageEnvs {
kv := strings.Split(e, "=")
if len(kv) != 2 {
return fmt.Errorf("invalid environment variable %q", e)
}
g.AddProcessEnv(kv[0], kv[1])
}
return nil
}
func clearReadOnly(m *runtimespec.Mount) {
var opt []string
for _, o := range m.Options {
if o != "ro" {
opt = append(opt, o)
}
}
m.Options = opt
}
// addDevices set device mapping.
func addOCIDevices(g *generate.Generator, devs []*runtime.Device, privileged bool) error {
spec := g.Spec()
if privileged {
hostDevices, err := devices.HostDevices()
if err != nil {
return err
}
for _, hostDevice := range hostDevices {
rd := runtimespec.LinuxDevice{
Path: hostDevice.Path,
Type: string(hostDevice.Type),
Major: hostDevice.Major,
Minor: hostDevice.Minor,
UID: &hostDevice.Uid,
GID: &hostDevice.Gid,
}
g.AddDevice(rd)
}
spec.Linux.Resources.Devices = []runtimespec.LinuxDeviceCgroup{
{
Allow: true,
Access: "rwm",
},
}
return nil
}
for _, device := range devs {
dev, err := devices.DeviceFromPath(device.HostPath, device.Permissions)
if err != nil {
return err
}
rd := runtimespec.LinuxDevice{
Path: device.ContainerPath,
Type: string(dev.Type),
Major: dev.Major,
Minor: dev.Minor,
UID: &dev.Uid,
GID: &dev.Gid,
}
g.AddDevice(rd)
spec.Linux.Resources.Devices = append(spec.Linux.Resources.Devices, runtimespec.LinuxDeviceCgroup{
Allow: true,
Type: string(dev.Type),
Major: &dev.Major,
Minor: &dev.Minor,
Access: dev.Permissions,
})
}
return nil
}
// addOCIBindMounts adds bind mounts.
// TODO(random-liu): Figure out whether we need to change all CRI mounts to readonly when
// rootfs is readonly. (https://github.com/moby/moby/blob/master/daemon/oci_linux.go)
func addOCIBindMounts(g *generate.Generator, mounts []*runtime.Mount, privileged bool) {
// Mount cgroup into the container as readonly, which inherits docker's behavior.
g.AddCgroupsMount("ro") // nolint: errcheck
for _, mount := range mounts {
dst := mount.GetContainerPath()
src := mount.GetHostPath()
options := []string{"rw"}
if mount.GetReadonly() {
options = []string{"ro"}
}
// TODO(random-liu): [P1] Apply selinux label
g.AddBindMount(src, dst, options)
}
if !privileged {
return
}
spec := g.Spec()
// clear readonly for /sys and cgroup
for i, m := range spec.Mounts {
if spec.Mounts[i].Destination == "/sys" && !spec.Root.Readonly {
clearReadOnly(&spec.Mounts[i])
}
if m.Type == "cgroup" {
clearReadOnly(&spec.Mounts[i])
}
}
spec.Linux.ReadonlyPaths = nil
spec.Linux.MaskedPaths = nil
}
// setOCILinuxResource set container resource limit.
func setOCILinuxResource(g *generate.Generator, resources *runtime.LinuxContainerResources) {
if resources == nil {
return
}
g.SetLinuxResourcesCPUPeriod(uint64(resources.GetCpuPeriod()))
g.SetLinuxResourcesCPUQuota(resources.GetCpuQuota())
g.SetLinuxResourcesCPUShares(uint64(resources.GetCpuShares()))
g.SetLinuxResourcesMemoryLimit(uint64(resources.GetMemoryLimitInBytes()))
g.SetLinuxResourcesOOMScoreAdj(int(resources.GetOomScoreAdj()))
}
// setOCICapabilities adds/drops process capabilities.
func setOCICapabilities(g *generate.Generator, capabilities *runtime.Capability, privileged bool) error {
if privileged {
// Add all capabilities in privileged mode.
g.SetupPrivileged(true)
return nil
}
if capabilities == nil {
return nil
}
// Capabilities in CRI doesn't have `CAP_` prefix, so add it.
for _, c := range capabilities.GetAddCapabilities() {
if err := g.AddProcessCapability("CAP_" + c); err != nil {
return err
}
}
for _, c := range capabilities.GetDropCapabilities() {
if err := g.DropProcessCapability("CAP_" + c); err != nil {
return err
}
}
return nil
}
// setOCINamespaces sets namespaces.
func setOCINamespaces(g *generate.Generator, namespaces *runtime.NamespaceOption, sandboxPid uint32) {
g.AddOrReplaceLinuxNamespace(string(runtimespec.NetworkNamespace), getNetworkNamespace(sandboxPid)) // nolint: errcheck
g.AddOrReplaceLinuxNamespace(string(runtimespec.IPCNamespace), getIPCNamespace(sandboxPid)) // nolint: errcheck
g.AddOrReplaceLinuxNamespace(string(runtimespec.UTSNamespace), getUTSNamespace(sandboxPid)) // nolint: errcheck
g.AddOrReplaceLinuxNamespace(string(runtimespec.PIDNamespace), getPIDNamespace(sandboxPid)) // nolint: errcheck
}

View File

@ -17,12 +17,16 @@ limitations under the License.
package server package server
import ( import (
"encoding/json"
"errors" "errors"
"os" "os"
"testing" "testing"
"github.com/containerd/containerd/api/services/containers"
snapshotapi "github.com/containerd/containerd/api/services/snapshot" snapshotapi "github.com/containerd/containerd/api/services/snapshot"
imagespec "github.com/opencontainers/image-spec/specs-go/v1" imagespec "github.com/opencontainers/image-spec/specs-go/v1"
runtimespec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/generate"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"golang.org/x/net/context" "golang.org/x/net/context"
@ -33,38 +37,428 @@ import (
servertesting "github.com/kubernetes-incubator/cri-containerd/pkg/server/testing" servertesting "github.com/kubernetes-incubator/cri-containerd/pkg/server/testing"
) )
func checkMount(t *testing.T, mounts []runtimespec.Mount, src, dest, typ string,
contains, notcontains []string) {
found := false
for _, m := range mounts {
if m.Source == src && m.Destination == dest {
assert.Equal(t, m.Type, typ)
for _, c := range contains {
assert.Contains(t, m.Options, c)
}
for _, n := range notcontains {
assert.NotContains(t, m.Options, n)
}
found = true
break
}
}
assert.True(t, found, "mount from %q to %q not found", src, dest)
}
func getCreateContainerTestData() (*runtime.ContainerConfig, *runtime.PodSandboxConfig,
*imagespec.ImageConfig, func(*testing.T, string, uint32, *runtimespec.Spec)) {
config := &runtime.ContainerConfig{
Metadata: &runtime.ContainerMetadata{
Name: "test-name",
Attempt: 1,
},
Image: &runtime.ImageSpec{
Image: "sha256:c75bebcdd211f41b3a460c7bf82970ed6c75acaab9cd4c9a4e125b03ca113799",
},
Command: []string{"test", "command"},
Args: []string{"test", "args"},
WorkingDir: "test-cwd",
Envs: []*runtime.KeyValue{
{Key: "k1", Value: "v1"},
{Key: "k2", Value: "v2"},
},
Mounts: []*runtime.Mount{
{
ContainerPath: "container-path-1",
HostPath: "host-path-1",
},
{
ContainerPath: "container-path-2",
HostPath: "host-path-2",
Readonly: true,
},
},
Labels: map[string]string{"a": "b"},
Annotations: map[string]string{"c": "d"},
Linux: &runtime.LinuxContainerConfig{
Resources: &runtime.LinuxContainerResources{
CpuPeriod: 100,
CpuQuota: 200,
CpuShares: 300,
MemoryLimitInBytes: 400,
OomScoreAdj: 500,
},
SecurityContext: &runtime.LinuxContainerSecurityContext{
Capabilities: &runtime.Capability{
AddCapabilities: []string{"SYS_ADMIN"},
DropCapabilities: []string{"CHOWN"},
},
SupplementalGroups: []int64{1111, 2222},
},
},
}
sandboxConfig := &runtime.PodSandboxConfig{
Metadata: &runtime.PodSandboxMetadata{
Name: "test-sandbox-name",
Uid: "test-sandbox-uid",
Namespace: "test-sandbox-ns",
Attempt: 2,
},
Linux: &runtime.LinuxPodSandboxConfig{
CgroupParent: "/test/cgroup/parent",
},
}
imageConfig := &imagespec.ImageConfig{
Env: []string{"ik1=iv1", "ik2=iv2"},
Entrypoint: []string{"/entrypoint"},
Cmd: []string{"cmd"},
WorkingDir: "/workspace",
}
specCheck := func(t *testing.T, id string, sandboxPid uint32, spec *runtimespec.Spec) {
assert.Equal(t, relativeRootfsPath, spec.Root.Path)
assert.Equal(t, []string{"test", "command", "test", "args"}, spec.Process.Args)
assert.Equal(t, "test-cwd", spec.Process.Cwd)
assert.Contains(t, spec.Process.Env, "k1=v1", "k2=v2", "ik1=iv1", "ik2=iv2")
t.Logf("Check cgroups bind mount")
checkMount(t, spec.Mounts, "cgroup", "/sys/fs/cgroup", "cgroup", []string{"ro"}, nil)
t.Logf("Check bind mount")
checkMount(t, spec.Mounts, "host-path-1", "container-path-1", "bind", []string{"rw"}, nil)
checkMount(t, spec.Mounts, "host-path-2", "container-path-2", "bind", []string{"ro"}, nil)
t.Logf("Check resource limits")
assert.EqualValues(t, *spec.Linux.Resources.CPU.Period, 100)
assert.EqualValues(t, *spec.Linux.Resources.CPU.Quota, 200)
assert.EqualValues(t, *spec.Linux.Resources.CPU.Shares, 300)
assert.EqualValues(t, *spec.Linux.Resources.Memory.Limit, 400)
assert.EqualValues(t, *spec.Linux.Resources.OOMScoreAdj, 500)
t.Logf("Check capabilities")
assert.Contains(t, spec.Process.Capabilities.Bounding, "CAP_SYS_ADMIN")
assert.Contains(t, spec.Process.Capabilities.Effective, "CAP_SYS_ADMIN")
assert.Contains(t, spec.Process.Capabilities.Inheritable, "CAP_SYS_ADMIN")
assert.Contains(t, spec.Process.Capabilities.Permitted, "CAP_SYS_ADMIN")
assert.Contains(t, spec.Process.Capabilities.Ambient, "CAP_SYS_ADMIN")
assert.NotContains(t, spec.Process.Capabilities.Bounding, "CAP_CHOWN")
assert.NotContains(t, spec.Process.Capabilities.Effective, "CAP_CHOWN")
assert.NotContains(t, spec.Process.Capabilities.Inheritable, "CAP_CHOWN")
assert.NotContains(t, spec.Process.Capabilities.Permitted, "CAP_CHOWN")
assert.NotContains(t, spec.Process.Capabilities.Ambient, "CAP_CHOWN")
t.Logf("Check supplemental groups")
assert.Contains(t, spec.Process.User.AdditionalGids, uint32(1111))
assert.Contains(t, spec.Process.User.AdditionalGids, uint32(2222))
t.Logf("Check cgroup path")
assert.Equal(t, getCgroupsPath("/test/cgroup/parent", id), spec.Linux.CgroupsPath)
t.Logf("Check namespaces")
assert.Contains(t, spec.Linux.Namespaces, runtimespec.LinuxNamespace{
Type: runtimespec.NetworkNamespace,
Path: getNetworkNamespace(sandboxPid),
})
assert.Contains(t, spec.Linux.Namespaces, runtimespec.LinuxNamespace{
Type: runtimespec.IPCNamespace,
Path: getIPCNamespace(sandboxPid),
})
assert.Contains(t, spec.Linux.Namespaces, runtimespec.LinuxNamespace{
Type: runtimespec.UTSNamespace,
Path: getUTSNamespace(sandboxPid),
})
assert.Contains(t, spec.Linux.Namespaces, runtimespec.LinuxNamespace{
Type: runtimespec.PIDNamespace,
Path: getPIDNamespace(sandboxPid),
})
}
return config, sandboxConfig, imageConfig, specCheck
}
func TestGeneralContainerSpec(t *testing.T) {
testID := "test-id"
testPid := uint32(1234)
config, sandboxConfig, imageConfig, specCheck := getCreateContainerTestData()
c := newTestCRIContainerdService()
spec, err := c.generateContainerSpec(testID, testPid, config, sandboxConfig, imageConfig, nil)
assert.NoError(t, err)
specCheck(t, testID, testPid, spec)
}
func TestContainerSpecTty(t *testing.T) {
testID := "test-id"
testPid := uint32(1234)
config, sandboxConfig, imageConfig, specCheck := getCreateContainerTestData()
c := newTestCRIContainerdService()
for _, tty := range []bool{true, false} {
config.Tty = tty
spec, err := c.generateContainerSpec(testID, testPid, config, sandboxConfig, imageConfig, nil)
assert.NoError(t, err)
specCheck(t, testID, testPid, spec)
assert.Equal(t, tty, spec.Process.Terminal)
}
}
func TestContainerSpecReadonlyRootfs(t *testing.T) {
testID := "test-id"
testPid := uint32(1234)
config, sandboxConfig, imageConfig, specCheck := getCreateContainerTestData()
c := newTestCRIContainerdService()
for _, readonly := range []bool{true, false} {
config.Linux.SecurityContext.ReadonlyRootfs = readonly
spec, err := c.generateContainerSpec(testID, testPid, config, sandboxConfig, imageConfig, nil)
assert.NoError(t, err)
specCheck(t, testID, testPid, spec)
assert.Equal(t, readonly, spec.Root.Readonly)
}
}
func TestContainerSpecWithExtraMounts(t *testing.T) {
testID := "test-id"
testPid := uint32(1234)
config, sandboxConfig, imageConfig, specCheck := getCreateContainerTestData()
c := newTestCRIContainerdService()
mountInConfig := &runtime.Mount{
ContainerPath: "test-container-path",
HostPath: "test-host-path",
Readonly: false,
}
config.Mounts = append(config.Mounts, mountInConfig)
extraMount := &runtime.Mount{
ContainerPath: "test-container-path",
HostPath: "test-host-path-extra",
Readonly: true,
}
spec, err := c.generateContainerSpec(testID, testPid, config, sandboxConfig, imageConfig, []*runtime.Mount{extraMount})
assert.NoError(t, err)
specCheck(t, testID, testPid, spec)
var mounts []runtimespec.Mount
for _, m := range spec.Mounts {
if m.Destination == "test-container-path" {
mounts = append(mounts, m)
}
}
t.Logf("Extra mounts should come first")
require.Len(t, mounts, 2)
assert.Equal(t, "test-host-path-extra", mounts[0].Source)
assert.Contains(t, mounts[0].Options, "ro")
assert.Equal(t, "test-host-path", mounts[1].Source)
assert.Contains(t, mounts[1].Options, "rw")
}
func TestContainerSpecCommand(t *testing.T) {
for desc, test := range map[string]struct {
criEntrypoint []string
criArgs []string
imageEntrypoint []string
imageArgs []string
expected []string
expectErr bool
}{
"should use cri entrypoint if it's specified": {
criEntrypoint: []string{"a", "b"},
imageEntrypoint: []string{"c", "d"},
imageArgs: []string{"e", "f"},
expected: []string{"a", "b"},
},
"should use cri entrypoint if it's specified even if it's empty": {
criEntrypoint: []string{},
criArgs: []string{"a", "b"},
imageEntrypoint: []string{"c", "d"},
imageArgs: []string{"e", "f"},
expected: []string{"a", "b"},
},
"should use cri entrypoint and args if they are specified": {
criEntrypoint: []string{"a", "b"},
criArgs: []string{"c", "d"},
imageEntrypoint: []string{"e", "f"},
imageArgs: []string{"g", "h"},
expected: []string{"a", "b", "c", "d"},
},
"should use image entrypoint if cri entrypoint is not specified": {
criArgs: []string{"a", "b"},
imageEntrypoint: []string{"c", "d"},
imageArgs: []string{"e", "f"},
expected: []string{"c", "d", "a", "b"},
},
"should use image args if both cri entrypoint and args are not specified": {
imageEntrypoint: []string{"c", "d"},
imageArgs: []string{"e", "f"},
expected: []string{"c", "d", "e", "f"},
},
"should return error if both entrypoint and args are empty": {
expectErr: true,
},
} {
config, _, imageConfig, _ := getCreateContainerTestData()
g := generate.New()
config.Command = test.criEntrypoint
config.Args = test.criArgs
imageConfig.Entrypoint = test.imageEntrypoint
imageConfig.Cmd = test.imageArgs
err := setOCIProcessArgs(&g, config, imageConfig)
if test.expectErr {
assert.Error(t, err)
continue
}
assert.NoError(t, err)
assert.Equal(t, test.expected, g.Spec().Process.Args, desc)
}
}
func TestGenerateContainerMounts(t *testing.T) {
testSandboxRootDir := "test-sandbox-root"
for desc, test := range map[string]struct {
securityContext *runtime.LinuxContainerSecurityContext
expectedMounts []*runtime.Mount
}{
"should setup ro mount when rootfs is read-only": {
securityContext: &runtime.LinuxContainerSecurityContext{
ReadonlyRootfs: true,
},
expectedMounts: []*runtime.Mount{
{
ContainerPath: "/etc/hosts",
HostPath: testSandboxRootDir + "/hosts",
Readonly: true,
},
{
ContainerPath: resolvConfPath,
HostPath: testSandboxRootDir + "/resolv.conf",
Readonly: true,
},
{
ContainerPath: "/dev/shm",
HostPath: testSandboxRootDir + "/shm",
Readonly: false,
},
},
},
"should setup rw mount when rootfs is read-write": {
securityContext: &runtime.LinuxContainerSecurityContext{},
expectedMounts: []*runtime.Mount{
{
ContainerPath: "/etc/hosts",
HostPath: testSandboxRootDir + "/hosts",
Readonly: false,
},
{
ContainerPath: resolvConfPath,
HostPath: testSandboxRootDir + "/resolv.conf",
Readonly: false,
},
{
ContainerPath: "/dev/shm",
HostPath: testSandboxRootDir + "/shm",
Readonly: false,
},
},
},
"should use host /dev/shm when host ipc is set": {
securityContext: &runtime.LinuxContainerSecurityContext{
NamespaceOptions: &runtime.NamespaceOption{HostIpc: true},
},
expectedMounts: []*runtime.Mount{
{
ContainerPath: "/etc/hosts",
HostPath: testSandboxRootDir + "/hosts",
Readonly: false,
},
{
ContainerPath: resolvConfPath,
HostPath: testSandboxRootDir + "/resolv.conf",
Readonly: false,
},
{
ContainerPath: "/dev/shm",
HostPath: "/dev/shm",
Readonly: false,
},
},
},
} {
config := &runtime.ContainerConfig{
Metadata: &runtime.ContainerMetadata{
Name: "test-name",
Attempt: 1,
},
Linux: &runtime.LinuxContainerConfig{
SecurityContext: test.securityContext,
},
}
c := newTestCRIContainerdService()
mounts := c.generateContainerMounts(testSandboxRootDir, config)
assert.Equal(t, test.expectedMounts, mounts, desc)
}
}
func TestPrivilegedBindMount(t *testing.T) {
for desc, test := range map[string]struct {
privileged bool
readonlyRootFS bool
expectedSysFSRO bool
expectedCgroupFSRO bool
}{
"sysfs and cgroupfs should mount as 'ro' by default": {
expectedSysFSRO: true,
expectedCgroupFSRO: true,
},
"sysfs and cgroupfs should not mount as 'ro' if privileged": {
privileged: true,
expectedSysFSRO: false,
expectedCgroupFSRO: false,
},
"sysfs should mount as 'ro' if root filrsystem is readonly": {
privileged: true,
readonlyRootFS: true,
expectedSysFSRO: true,
expectedCgroupFSRO: false,
},
} {
t.Logf("TestCase %q", desc)
g := generate.New()
g.SetRootReadonly(test.readonlyRootFS)
addOCIBindMounts(&g, nil, test.privileged)
spec := g.Spec()
if test.expectedSysFSRO {
checkMount(t, spec.Mounts, "sysfs", "/sys", "sysfs", []string{"ro"}, nil)
} else {
checkMount(t, spec.Mounts, "sysfs", "/sys", "sysfs", nil, []string{"ro"})
}
if test.expectedCgroupFSRO {
checkMount(t, spec.Mounts, "cgroup", "/sys/fs/cgroup", "cgroup", []string{"ro"}, nil)
} else {
checkMount(t, spec.Mounts, "cgroup", "/sys/fs/cgroup", "cgroup", nil, []string{"ro"})
}
}
}
func TestCreateContainer(t *testing.T) { func TestCreateContainer(t *testing.T) {
testSandboxID := "test-sandbox-id" testSandboxID := "test-sandbox-id"
testNameMeta := &runtime.ContainerMetadata{ testSandboxPid := uint32(4321)
Name: "test-name", config, sandboxConfig, imageConfig, specCheck := getCreateContainerTestData()
Attempt: 1, testSandboxMetadata := &metadata.SandboxMetadata{
} ID: testSandboxID,
testSandboxNameMeta := &runtime.PodSandboxMetadata{ Name: "test-sandbox-name",
Name: "test-sandbox-name", Config: sandboxConfig,
Uid: "test-sandbox-uid", Pid: testSandboxPid,
Namespace: "test-sandbox-namespace",
Attempt: 2,
} }
testContainerName := makeContainerName(config.Metadata, sandboxConfig.Metadata)
// Use an image id to avoid image name resolution. // Use an image id to avoid image name resolution.
// TODO(random-liu): Change this to image name after we have complete image // TODO(random-liu): Change this to image name after we have complete image
// management unit test framework. // management unit test framework.
testImage := "sha256:c75bebcdd211f41b3a460c7bf82970ed6c75acaab9cd4c9a4e125b03ca113799" testImage := config.GetImage().GetImage()
testChainID := "test-chain-id" testChainID := "test-chain-id"
testImageMetadata := metadata.ImageMetadata{ testImageMetadata := metadata.ImageMetadata{
ID: testImage, ID: testImage,
ChainID: testChainID, ChainID: testChainID,
Config: &imagespec.ImageConfig{}, Config: imageConfig,
}
testConfig := &runtime.ContainerConfig{
Metadata: testNameMeta,
Image: &runtime.ImageSpec{
Image: testImage,
},
Labels: map[string]string{"a": "b"},
Annotations: map[string]string{"c": "d"},
}
testSandboxConfig := &runtime.PodSandboxConfig{
Metadata: testSandboxNameMeta,
} }
for desc, test := range map[string]struct { for desc, test := range map[string]struct {
@ -82,66 +476,46 @@ func TestCreateContainer(t *testing.T) {
expectErr: true, expectErr: true,
}, },
"should return error if name is reserved": { "should return error if name is reserved": {
sandboxMetadata: &metadata.SandboxMetadata{ sandboxMetadata: testSandboxMetadata,
ID: testSandboxID, reserveNameErr: true,
Name: makeSandboxName(testSandboxNameMeta), expectErr: true,
Config: testSandboxConfig,
},
reserveNameErr: true,
expectErr: true,
}, },
"should return error if fail to create root directory": { "should return error if fail to create root directory": {
sandboxMetadata: &metadata.SandboxMetadata{ sandboxMetadata: testSandboxMetadata,
ID: testSandboxID,
Name: makeSandboxName(testSandboxNameMeta),
Config: testSandboxConfig,
},
createRootDirErr: errors.New("random error"), createRootDirErr: errors.New("random error"),
expectErr: true, expectErr: true,
}, },
"should return error if image is not pulled": { "should return error if image is not pulled": {
sandboxMetadata: &metadata.SandboxMetadata{ sandboxMetadata: testSandboxMetadata,
ID: testSandboxID,
Name: makeSandboxName(testSandboxNameMeta),
Config: testSandboxConfig,
},
imageMetadataErr: true, imageMetadataErr: true,
expectErr: true, expectErr: true,
}, },
"should return error if prepare snapshot fails": { "should return error if prepare snapshot fails": {
sandboxMetadata: &metadata.SandboxMetadata{ sandboxMetadata: testSandboxMetadata,
ID: testSandboxID,
Name: makeSandboxName(testSandboxNameMeta),
Config: testSandboxConfig,
},
prepareSnapshotErr: errors.New("random error"), prepareSnapshotErr: errors.New("random error"),
expectErr: true, expectErr: true,
}, },
"should be able to create container successfully": { "should be able to create container successfully": {
sandboxMetadata: &metadata.SandboxMetadata{ sandboxMetadata: testSandboxMetadata,
ID: testSandboxID, expectErr: false,
Name: makeSandboxName(testSandboxNameMeta),
Config: testSandboxConfig,
},
expectErr: false,
expectMeta: &metadata.ContainerMetadata{ expectMeta: &metadata.ContainerMetadata{
Name: makeContainerName(testNameMeta, testSandboxNameMeta), Name: testContainerName,
SandboxID: testSandboxID, SandboxID: testSandboxID,
ImageRef: testImage, ImageRef: testImage,
Config: testConfig, Config: config,
}, },
}, },
} { } {
t.Logf("TestCase %q", desc) t.Logf("TestCase %q", desc)
c := newTestCRIContainerdService() c := newTestCRIContainerdService()
fake := c.containerService.(*servertesting.FakeContainersClient)
fakeSnapshotClient := WithFakeSnapshotClient(c) fakeSnapshotClient := WithFakeSnapshotClient(c)
fakeOS := c.os.(*ostesting.FakeOS) fakeOS := c.os.(*ostesting.FakeOS)
if test.sandboxMetadata != nil { if test.sandboxMetadata != nil {
assert.NoError(t, c.sandboxStore.Create(*test.sandboxMetadata)) assert.NoError(t, c.sandboxStore.Create(*test.sandboxMetadata))
} }
containerName := makeContainerName(testNameMeta, testSandboxNameMeta)
if test.reserveNameErr { if test.reserveNameErr {
assert.NoError(t, c.containerNameIndex.Reserve(containerName, "random id")) assert.NoError(t, c.containerNameIndex.Reserve(testContainerName, "random id"))
} }
if !test.imageMetadataErr { if !test.imageMetadataErr {
assert.NoError(t, c.imageMetadataStore.Create(testImageMetadata)) assert.NoError(t, c.imageMetadataStore.Create(testImageMetadata))
@ -166,18 +540,21 @@ func TestCreateContainer(t *testing.T) {
} }
resp, err := c.CreateContainer(context.Background(), &runtime.CreateContainerRequest{ resp, err := c.CreateContainer(context.Background(), &runtime.CreateContainerRequest{
PodSandboxId: testSandboxID, PodSandboxId: testSandboxID,
Config: testConfig, Config: config,
SandboxConfig: testSandboxConfig, SandboxConfig: sandboxConfig,
}) })
if test.expectErr { if test.expectErr {
assert.Error(t, err) assert.Error(t, err)
assert.Nil(t, resp) assert.Nil(t, resp)
assert.False(t, rootExists, "root directory should be cleaned up") assert.False(t, rootExists, "root directory should be cleaned up")
if !test.reserveNameErr { if !test.reserveNameErr {
assert.NoError(t, c.containerNameIndex.Reserve(containerName, "random id"), assert.NoError(t, c.containerNameIndex.Reserve(testContainerName, "random id"),
"container name should be released") "container name should be released")
} }
assert.Empty(t, fakeSnapshotClient.ListMounts(), "snapshot should be cleaned up") assert.Empty(t, fakeSnapshotClient.ListMounts(), "snapshot should be cleaned up")
listResp, err := fake.List(context.Background(), &containers.ListContainersRequest{})
assert.NoError(t, err)
assert.Empty(t, listResp.Containers, "containerd container should be cleaned up")
metas, err := c.containerStore.List() metas, err := c.containerStore.List()
assert.NoError(t, err) assert.NoError(t, err)
assert.Empty(t, metas, "container metadata should not be created") assert.Empty(t, metas, "container metadata should not be created")
@ -196,9 +573,22 @@ func TestCreateContainer(t *testing.T) {
test.expectMeta.CreatedAt = meta.CreatedAt test.expectMeta.CreatedAt = meta.CreatedAt
assert.Equal(t, test.expectMeta, meta, "container metadata should be created") assert.Equal(t, test.expectMeta, meta, "container metadata should be created")
// Check runtime spec
containersCalls := fake.GetCalledDetails()
require.Len(t, containersCalls, 1)
createOpts, ok := containersCalls[0].Argument.(*containers.CreateContainerRequest)
assert.True(t, ok, "should create containerd container")
assert.Equal(t, id, createOpts.Container.ID, "container id should be correct")
assert.Equal(t, testImage, createOpts.Container.Image, "test image should be correct")
assert.Equal(t, id, createOpts.Container.RootFS, "rootfs should be correct")
spec := &runtimespec.Spec{}
assert.NoError(t, json.Unmarshal(createOpts.Container.Spec.Value, spec))
specCheck(t, id, testSandboxPid, spec)
assert.Equal(t, []string{"prepare"}, fakeSnapshotClient.GetCalledNames(), "prepare should be called") assert.Equal(t, []string{"prepare"}, fakeSnapshotClient.GetCalledNames(), "prepare should be called")
calls := fakeSnapshotClient.GetCalledDetails() snapshotCalls := fakeSnapshotClient.GetCalledDetails()
prepareOpts := calls[0].Argument.(*snapshotapi.PrepareRequest) require.Len(t, snapshotCalls, 1)
prepareOpts := snapshotCalls[0].Argument.(*snapshotapi.PrepareRequest)
assert.Equal(t, &snapshotapi.PrepareRequest{ assert.Equal(t, &snapshotapi.PrepareRequest{
Key: id, Key: id,
Parent: testChainID, Parent: testChainID,

View File

@ -19,6 +19,7 @@ package server
import ( import (
"fmt" "fmt"
"github.com/containerd/containerd/api/services/containers"
"github.com/containerd/containerd/snapshot" "github.com/containerd/containerd/snapshot"
"github.com/golang/glog" "github.com/golang/glog"
"golang.org/x/net/context" "golang.org/x/net/context"
@ -84,6 +85,14 @@ func (c *criContainerdService) RemoveContainer(ctx context.Context, r *runtime.R
containerRootDir, err) containerRootDir, err)
} }
// Delete containerd container.
if _, err := c.containerService.Delete(ctx, &containers.DeleteContainerRequest{ID: id}); err != nil {
if !isContainerdGRPCNotFoundError(err) {
return nil, fmt.Errorf("failed to delete containerd container %q: %v", id, err)
}
glog.V(5).Infof("Remove called for containerd container %q that does not exist", id, err)
}
// Delete container metadata. // Delete container metadata.
if err := c.containerStore.Delete(id); err != nil { if err := c.containerStore.Delete(id); err != nil {
return nil, fmt.Errorf("failed to delete container metadata for %q: %v", id, err) return nil, fmt.Errorf("failed to delete container metadata for %q: %v", id, err)

View File

@ -21,6 +21,7 @@ import (
"testing" "testing"
"time" "time"
"github.com/containerd/containerd/api/services/containers"
snapshotapi "github.com/containerd/containerd/api/services/snapshot" snapshotapi "github.com/containerd/containerd/api/services/snapshot"
"github.com/containerd/containerd/api/types/mount" "github.com/containerd/containerd/api/types/mount"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
@ -100,6 +101,7 @@ func TestRemoveContainer(t *testing.T) {
for desc, test := range map[string]struct { for desc, test := range map[string]struct {
metadata *metadata.ContainerMetadata metadata *metadata.ContainerMetadata
removeSnapshotErr error removeSnapshotErr error
deleteContainerErr error
removeDirErr error removeDirErr error
expectErr bool expectErr bool
expectUnsetRemoving bool expectUnsetRemoving bool
@ -122,10 +124,11 @@ func TestRemoveContainer(t *testing.T) {
}, },
expectErr: true, expectErr: true,
}, },
"should not return error if container does not exist": { "should not return error if container metadata does not exist": {
metadata: nil, metadata: nil,
removeSnapshotErr: servertesting.SnapshotNotExistError, removeSnapshotErr: servertesting.SnapshotNotExistError,
expectErr: false, deleteContainerErr: servertesting.ContainerNotExistError,
expectErr: false,
}, },
"should not return error if snapshot does not exist": { "should not return error if snapshot does not exist": {
metadata: testContainerMetadata, metadata: testContainerMetadata,
@ -137,6 +140,16 @@ func TestRemoveContainer(t *testing.T) {
removeSnapshotErr: errors.New("random error"), removeSnapshotErr: errors.New("random error"),
expectErr: true, expectErr: true,
}, },
"should not return error if containerd container does not exist": {
metadata: testContainerMetadata,
deleteContainerErr: servertesting.ContainerNotExistError,
expectErr: false,
},
"should return error if delete containerd container fails": {
metadata: testContainerMetadata,
deleteContainerErr: errors.New("random error"),
expectErr: true,
},
"should return error if remove container root fails": { "should return error if remove container root fails": {
metadata: testContainerMetadata, metadata: testContainerMetadata,
removeDirErr: errors.New("random error"), removeDirErr: errors.New("random error"),
@ -150,6 +163,7 @@ func TestRemoveContainer(t *testing.T) {
} { } {
t.Logf("TestCase %q", desc) t.Logf("TestCase %q", desc)
c := newTestCRIContainerdService() c := newTestCRIContainerdService()
fake := c.containerService.(*servertesting.FakeContainersClient)
fakeSnapshotClient := WithFakeSnapshotClient(c) fakeSnapshotClient := WithFakeSnapshotClient(c)
fakeOS := c.os.(*ostesting.FakeOS) fakeOS := c.os.(*ostesting.FakeOS)
if test.metadata != nil { if test.metadata != nil {
@ -171,6 +185,14 @@ func TestRemoveContainer(t *testing.T) {
} else { } else {
fakeSnapshotClient.InjectError("remove", test.removeSnapshotErr) fakeSnapshotClient.InjectError("remove", test.removeSnapshotErr)
} }
if test.deleteContainerErr == nil {
_, err := fake.Create(context.Background(), &containers.CreateContainerRequest{
Container: containers.Container{ID: testID},
})
assert.NoError(t, err)
} else {
fake.InjectError("delete", test.deleteContainerErr)
}
resp, err := c.RemoveContainer(context.Background(), &runtime.RemoveContainerRequest{ resp, err := c.RemoveContainer(context.Background(), &runtime.RemoveContainerRequest{
ContainerId: testID, ContainerId: testID,
}) })
@ -198,5 +220,15 @@ func TestRemoveContainer(t *testing.T) {
mountsResp, err := fakeSnapshotClient.Mounts(context.Background(), &snapshotapi.MountsRequest{Key: testID}) mountsResp, err := fakeSnapshotClient.Mounts(context.Background(), &snapshotapi.MountsRequest{Key: testID})
assert.Equal(t, servertesting.SnapshotNotExistError, err, "snapshot should be removed") assert.Equal(t, servertesting.SnapshotNotExistError, err, "snapshot should be removed")
assert.Nil(t, mountsResp) assert.Nil(t, mountsResp)
getResp, err := fake.Get(context.Background(), &containers.GetContainerRequest{ID: testID})
assert.Equal(t, servertesting.ContainerNotExistError, err, "containerd container should be removed")
assert.Nil(t, getResp)
resp, err = c.RemoveContainer(context.Background(), &runtime.RemoveContainerRequest{
ContainerId: testID,
})
assert.NoError(t, err)
assert.NotNil(t, resp, "remove should be idempotent")
} }
} }

View File

@ -17,22 +17,16 @@ limitations under the License.
package server package server
import ( import (
"encoding/json"
"fmt" "fmt"
"io" "io"
"os" "os"
"path/filepath" "path/filepath"
"strings"
"time" "time"
"github.com/containerd/containerd/api/services/execution" "github.com/containerd/containerd/api/services/execution"
"github.com/containerd/containerd/api/types/mount" "github.com/containerd/containerd/api/types/mount"
"github.com/containerd/containerd/api/types/task" "github.com/containerd/containerd/api/types/task"
"github.com/golang/glog" "github.com/golang/glog"
imagespec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/opencontainers/runc/libcontainer/devices"
runtimespec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/generate"
"golang.org/x/net/context" "golang.org/x/net/context"
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1" runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1"
@ -112,26 +106,6 @@ func (c *criContainerdService) startContainer(ctx context.Context, id string, me
if sandboxInfo.Task.Status != task.StatusRunning { if sandboxInfo.Task.Status != task.StatusRunning {
return fmt.Errorf("sandbox container %q is not running", sandboxID) return fmt.Errorf("sandbox container %q is not running", sandboxID)
} }
sandboxPid := sandboxInfo.Task.Pid
glog.V(2).Infof("Sandbox container %q is running with pid %d", sandboxID, sandboxPid)
// Generate containerd task create options.
imageMeta, err := c.imageMetadataStore.Get(meta.ImageRef)
if err != nil {
return fmt.Errorf("failed to get container image %q: %v", meta.ImageRef, err)
}
mounts := c.generateContainerMounts(getSandboxRootDir(c.rootDir, sandboxID), config)
spec, err := c.generateContainerSpec(id, sandboxPid, config, sandboxConfig, imageMeta.Config, mounts)
if err != nil {
return fmt.Errorf("failed to generate container %q spec: %v", id, err)
}
_, err = json.Marshal(spec)
if err != nil {
return fmt.Errorf("failed to marshal oci spec %+v: %v", spec, err)
}
glog.V(4).Infof("Container spec: %+v", spec)
containerRootDir := getContainerRootDir(c.rootDir, id) containerRootDir := getContainerRootDir(c.rootDir, id)
stdin, stdout, stderr := getStreamingPipes(containerRootDir) stdin, stdout, stderr := getStreamingPipes(containerRootDir)
@ -153,7 +127,6 @@ func (c *criContainerdService) startContainer(ctx context.Context, id string, me
} }
}() }()
// Redirect the stream to std for now. // Redirect the stream to std for now.
// TODO(random-liu): [P1] Support container logging.
// TODO(random-liu): [P1] Support StdinOnce after container logging is added. // TODO(random-liu): [P1] Support StdinOnce after container logging is added.
if stdinPipe != nil { if stdinPipe != nil {
go func(w io.WriteCloser) { go func(w io.WriteCloser) {
@ -223,283 +196,3 @@ func (c *criContainerdService) startContainer(ctx context.Context, id string, me
meta.StartedAt = time.Now().UnixNano() meta.StartedAt = time.Now().UnixNano()
return nil return nil
} }
func (c *criContainerdService) generateContainerSpec(id string, sandboxPid uint32, config *runtime.ContainerConfig,
sandboxConfig *runtime.PodSandboxConfig, imageConfig *imagespec.ImageConfig, extraMounts []*runtime.Mount) (*runtimespec.Spec, error) {
// Creates a spec Generator with the default spec.
// TODO(random-liu): [P2] Move container runtime spec generation into a helper function.
g := generate.New()
// Set the relative path to the rootfs of the container from containerd's
// pre-defined directory.
g.SetRootPath(relativeRootfsPath)
if err := setOCIProcessArgs(&g, config, imageConfig); err != nil {
return nil, err
}
if config.GetWorkingDir() != "" {
g.SetProcessCwd(config.GetWorkingDir())
} else if imageConfig.WorkingDir != "" {
g.SetProcessCwd(imageConfig.WorkingDir)
}
// Apply envs from image config first, so that envs from container config
// can override them.
if err := addImageEnvs(&g, imageConfig.Env); err != nil {
return nil, err
}
for _, e := range config.GetEnvs() {
g.AddProcessEnv(e.GetKey(), e.GetValue())
}
// TODO: add setOCIPrivileged group all privileged logic together
securityContext := config.GetLinux().GetSecurityContext()
// Add extra mounts first so that CRI specified mounts can override.
addOCIBindMounts(&g, append(extraMounts, config.GetMounts()...), securityContext.GetPrivileged())
g.SetRootReadonly(securityContext.GetReadonlyRootfs())
if err := addOCIDevices(&g, config.GetDevices(), securityContext.GetPrivileged()); err != nil {
return nil, fmt.Errorf("failed to set devices mapping %+v: %v", config.GetDevices(), err)
}
// TODO(random-liu): [P1] Handle container logging, decorate and redirect to file.
setOCILinuxResource(&g, config.GetLinux().GetResources())
if sandboxConfig.GetLinux().GetCgroupParent() != "" {
cgroupsPath := getCgroupsPath(sandboxConfig.GetLinux().GetCgroupParent(), id)
g.SetLinuxCgroupsPath(cgroupsPath)
}
g.SetProcessTerminal(config.GetTty())
if err := setOCICapabilities(&g, securityContext.GetCapabilities(), securityContext.GetPrivileged()); err != nil {
return nil, fmt.Errorf("failed to set capabilities %+v: %v",
securityContext.GetCapabilities(), err)
}
// Set namespaces, share namespace with sandbox container.
setOCINamespaces(&g, securityContext.GetNamespaceOptions(), sandboxPid)
// TODO(random-liu): [P1] Set selinux options.
// TODO(random-liu): [P1] Set user/username.
supplementalGroups := securityContext.GetSupplementalGroups()
for _, group := range supplementalGroups {
g.AddProcessAdditionalGid(uint32(group))
}
// TODO(random-liu): [P2] Add apparmor and seccomp.
return g.Spec(), nil
}
// generateContainerMounts sets up necessary container mounts including /dev/shm, /etc/hosts
// and /etc/resolv.conf.
func (c *criContainerdService) generateContainerMounts(sandboxRootDir string, config *runtime.ContainerConfig) []*runtime.Mount {
var mounts []*runtime.Mount
securityContext := config.GetLinux().GetSecurityContext()
mounts = append(mounts, &runtime.Mount{
ContainerPath: etcHosts,
HostPath: getSandboxHosts(sandboxRootDir),
Readonly: securityContext.GetReadonlyRootfs(),
})
// Mount sandbox resolv.config.
// TODO: Need to figure out whether we should always mount it as read-only
mounts = append(mounts, &runtime.Mount{
ContainerPath: resolvConfPath,
HostPath: getResolvPath(sandboxRootDir),
Readonly: securityContext.GetReadonlyRootfs(),
})
sandboxDevShm := getSandboxDevShm(sandboxRootDir)
if securityContext.GetNamespaceOptions().GetHostIpc() {
sandboxDevShm = devShm
}
mounts = append(mounts, &runtime.Mount{
ContainerPath: devShm,
HostPath: sandboxDevShm,
Readonly: false,
})
return mounts
}
// setOCIProcessArgs sets process args. It returns error if the final arg list
// is empty.
func setOCIProcessArgs(g *generate.Generator, config *runtime.ContainerConfig, imageConfig *imagespec.ImageConfig) error {
command, args := config.GetCommand(), config.GetArgs()
// The following logic is migrated from https://github.com/moby/moby/blob/master/daemon/commit.go
// TODO(random-liu): Clearly define the commands overwrite behavior.
if len(command) == 0 {
if len(args) == 0 {
args = imageConfig.Cmd
}
if command == nil {
command = imageConfig.Entrypoint
}
}
if len(command) == 0 && len(args) == 0 {
return fmt.Errorf("no command specified")
}
g.SetProcessArgs(append(command, args...))
return nil
}
// addImageEnvs adds environment variables from image config. It returns error if
// an invalid environment variable is encountered.
func addImageEnvs(g *generate.Generator, imageEnvs []string) error {
for _, e := range imageEnvs {
kv := strings.Split(e, "=")
if len(kv) != 2 {
return fmt.Errorf("invalid environment variable %q", e)
}
g.AddProcessEnv(kv[0], kv[1])
}
return nil
}
func clearReadOnly(m *runtimespec.Mount) {
var opt []string
for _, o := range m.Options {
if o != "ro" {
opt = append(opt, o)
}
}
m.Options = opt
}
// addDevices set device mapping.
func addOCIDevices(g *generate.Generator, devs []*runtime.Device, privileged bool) error {
spec := g.Spec()
if privileged {
hostDevices, err := devices.HostDevices()
if err != nil {
return err
}
for _, hostDevice := range hostDevices {
rd := runtimespec.LinuxDevice{
Path: hostDevice.Path,
Type: string(hostDevice.Type),
Major: hostDevice.Major,
Minor: hostDevice.Minor,
UID: &hostDevice.Uid,
GID: &hostDevice.Gid,
}
g.AddDevice(rd)
}
spec.Linux.Resources.Devices = []runtimespec.LinuxDeviceCgroup{
{
Allow: true,
Access: "rwm",
},
}
return nil
}
for _, device := range devs {
dev, err := devices.DeviceFromPath(device.HostPath, device.Permissions)
if err != nil {
return err
}
rd := runtimespec.LinuxDevice{
Path: device.ContainerPath,
Type: string(dev.Type),
Major: dev.Major,
Minor: dev.Minor,
UID: &dev.Uid,
GID: &dev.Gid,
}
g.AddDevice(rd)
spec.Linux.Resources.Devices = append(spec.Linux.Resources.Devices, runtimespec.LinuxDeviceCgroup{
Allow: true,
Type: string(dev.Type),
Major: &dev.Major,
Minor: &dev.Minor,
Access: dev.Permissions,
})
}
return nil
}
// addOCIBindMounts adds bind mounts.
// TODO(random-liu): Figure out whether we need to change all CRI mounts to readonly when
// rootfs is readonly. (https://github.com/moby/moby/blob/master/daemon/oci_linux.go)
func addOCIBindMounts(g *generate.Generator, mounts []*runtime.Mount, privileged bool) {
// Mount cgroup into the container as readonly, which inherits docker's behavior.
g.AddCgroupsMount("ro") // nolint: errcheck
for _, mount := range mounts {
dst := mount.GetContainerPath()
src := mount.GetHostPath()
options := []string{"rw"}
if mount.GetReadonly() {
options = []string{"ro"}
}
// TODO(random-liu): [P1] Apply selinux label
g.AddBindMount(src, dst, options)
}
if !privileged {
return
}
spec := g.Spec()
// clear readonly for /sys and cgroup
for i, m := range spec.Mounts {
if spec.Mounts[i].Destination == "/sys" && !spec.Root.Readonly {
clearReadOnly(&spec.Mounts[i])
}
if m.Type == "cgroup" {
clearReadOnly(&spec.Mounts[i])
}
}
spec.Linux.ReadonlyPaths = nil
spec.Linux.MaskedPaths = nil
}
// setOCILinuxResource set container resource limit.
func setOCILinuxResource(g *generate.Generator, resources *runtime.LinuxContainerResources) {
if resources == nil {
return
}
g.SetLinuxResourcesCPUPeriod(uint64(resources.GetCpuPeriod()))
g.SetLinuxResourcesCPUQuota(resources.GetCpuQuota())
g.SetLinuxResourcesCPUShares(uint64(resources.GetCpuShares()))
g.SetLinuxResourcesMemoryLimit(uint64(resources.GetMemoryLimitInBytes()))
g.SetLinuxResourcesOOMScoreAdj(int(resources.GetOomScoreAdj()))
}
// setOCICapabilities adds/drops process capabilities.
func setOCICapabilities(g *generate.Generator, capabilities *runtime.Capability, privileged bool) error {
if privileged {
// Add all capabilities in privileged mode.
g.SetupPrivileged(true)
return nil
}
if capabilities == nil {
return nil
}
// Capabilities in CRI doesn't have `CAP_` prefix, so add it.
for _, c := range capabilities.GetAddCapabilities() {
if err := g.AddProcessCapability("CAP_" + c); err != nil {
return err
}
}
for _, c := range capabilities.GetDropCapabilities() {
if err := g.DropProcessCapability("CAP_" + c); err != nil {
return err
}
}
return nil
}
// setOCINamespaces sets namespaces.
func setOCINamespaces(g *generate.Generator, namespaces *runtime.NamespaceOption, sandboxPid uint32) {
g.AddOrReplaceLinuxNamespace(string(runtimespec.NetworkNamespace), getNetworkNamespace(sandboxPid)) // nolint: errcheck
g.AddOrReplaceLinuxNamespace(string(runtimespec.IPCNamespace), getIPCNamespace(sandboxPid)) // nolint: errcheck
g.AddOrReplaceLinuxNamespace(string(runtimespec.UTSNamespace), getUTSNamespace(sandboxPid)) // nolint: errcheck
g.AddOrReplaceLinuxNamespace(string(runtimespec.PIDNamespace), getPIDNamespace(sandboxPid)) // nolint: errcheck
}

View File

@ -26,9 +26,6 @@ import (
"github.com/containerd/containerd/api/services/execution" "github.com/containerd/containerd/api/services/execution"
"github.com/containerd/containerd/api/types/mount" "github.com/containerd/containerd/api/types/mount"
"github.com/containerd/containerd/api/types/task" "github.com/containerd/containerd/api/types/task"
imagespec "github.com/opencontainers/image-spec/specs-go/v1"
runtimespec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/generate"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"golang.org/x/net/context" "golang.org/x/net/context"
@ -39,427 +36,22 @@ import (
servertesting "github.com/kubernetes-incubator/cri-containerd/pkg/server/testing" servertesting "github.com/kubernetes-incubator/cri-containerd/pkg/server/testing"
) )
func checkMount(t *testing.T, mounts []runtimespec.Mount, src, dest, typ string,
contains, notcontains []string) {
found := false
for _, m := range mounts {
if m.Source == src && m.Destination == dest {
assert.Equal(t, m.Type, typ)
for _, c := range contains {
assert.Contains(t, m.Options, c)
}
for _, n := range notcontains {
assert.NotContains(t, m.Options, n)
}
found = true
break
}
}
assert.True(t, found, "mount from %q to %q not found", src, dest)
}
func getStartContainerTestData() (*runtime.ContainerConfig, *runtime.PodSandboxConfig,
*imagespec.ImageConfig, func(*testing.T, string, uint32, *runtimespec.Spec)) {
config := &runtime.ContainerConfig{
Metadata: &runtime.ContainerMetadata{
Name: "test-name",
Attempt: 1,
},
Command: []string{"test", "command"},
Args: []string{"test", "args"},
WorkingDir: "test-cwd",
Envs: []*runtime.KeyValue{
{Key: "k1", Value: "v1"},
{Key: "k2", Value: "v2"},
},
Mounts: []*runtime.Mount{
{
ContainerPath: "container-path-1",
HostPath: "host-path-1",
},
{
ContainerPath: "container-path-2",
HostPath: "host-path-2",
Readonly: true,
},
},
Labels: map[string]string{"a": "b"},
Annotations: map[string]string{"c": "d"},
Linux: &runtime.LinuxContainerConfig{
Resources: &runtime.LinuxContainerResources{
CpuPeriod: 100,
CpuQuota: 200,
CpuShares: 300,
MemoryLimitInBytes: 400,
OomScoreAdj: 500,
},
SecurityContext: &runtime.LinuxContainerSecurityContext{
Capabilities: &runtime.Capability{
AddCapabilities: []string{"SYS_ADMIN"},
DropCapabilities: []string{"CHOWN"},
},
SupplementalGroups: []int64{1111, 2222},
},
},
}
sandboxConfig := &runtime.PodSandboxConfig{
Metadata: &runtime.PodSandboxMetadata{
Name: "test-sandbox-name",
Uid: "test-sandbox-uid",
Namespace: "test-sandbox-ns",
Attempt: 2,
},
Linux: &runtime.LinuxPodSandboxConfig{
CgroupParent: "/test/cgroup/parent",
},
}
imageConfig := &imagespec.ImageConfig{
Env: []string{"ik1=iv1", "ik2=iv2"},
Entrypoint: []string{"/entrypoint"},
Cmd: []string{"cmd"},
WorkingDir: "/workspace",
}
specCheck := func(t *testing.T, id string, sandboxPid uint32, spec *runtimespec.Spec) {
assert.Equal(t, relativeRootfsPath, spec.Root.Path)
assert.Equal(t, []string{"test", "command", "test", "args"}, spec.Process.Args)
assert.Equal(t, "test-cwd", spec.Process.Cwd)
assert.Contains(t, spec.Process.Env, "k1=v1", "k2=v2", "ik1=iv1", "ik2=iv2")
t.Logf("Check cgroups bind mount")
checkMount(t, spec.Mounts, "cgroup", "/sys/fs/cgroup", "cgroup", []string{"ro"}, nil)
t.Logf("Check bind mount")
checkMount(t, spec.Mounts, "host-path-1", "container-path-1", "bind", []string{"rw"}, nil)
checkMount(t, spec.Mounts, "host-path-2", "container-path-2", "bind", []string{"ro"}, nil)
t.Logf("Check resource limits")
assert.EqualValues(t, *spec.Linux.Resources.CPU.Period, 100)
assert.EqualValues(t, *spec.Linux.Resources.CPU.Quota, 200)
assert.EqualValues(t, *spec.Linux.Resources.CPU.Shares, 300)
assert.EqualValues(t, *spec.Linux.Resources.Memory.Limit, 400)
assert.EqualValues(t, *spec.Linux.Resources.OOMScoreAdj, 500)
t.Logf("Check capabilities")
assert.Contains(t, spec.Process.Capabilities.Bounding, "CAP_SYS_ADMIN")
assert.Contains(t, spec.Process.Capabilities.Effective, "CAP_SYS_ADMIN")
assert.Contains(t, spec.Process.Capabilities.Inheritable, "CAP_SYS_ADMIN")
assert.Contains(t, spec.Process.Capabilities.Permitted, "CAP_SYS_ADMIN")
assert.Contains(t, spec.Process.Capabilities.Ambient, "CAP_SYS_ADMIN")
assert.NotContains(t, spec.Process.Capabilities.Bounding, "CAP_CHOWN")
assert.NotContains(t, spec.Process.Capabilities.Effective, "CAP_CHOWN")
assert.NotContains(t, spec.Process.Capabilities.Inheritable, "CAP_CHOWN")
assert.NotContains(t, spec.Process.Capabilities.Permitted, "CAP_CHOWN")
assert.NotContains(t, spec.Process.Capabilities.Ambient, "CAP_CHOWN")
t.Logf("Check supplemental groups")
assert.Contains(t, spec.Process.User.AdditionalGids, uint32(1111))
assert.Contains(t, spec.Process.User.AdditionalGids, uint32(2222))
t.Logf("Check cgroup path")
assert.Equal(t, getCgroupsPath("/test/cgroup/parent", id), spec.Linux.CgroupsPath)
t.Logf("Check namespaces")
assert.Contains(t, spec.Linux.Namespaces, runtimespec.LinuxNamespace{
Type: runtimespec.NetworkNamespace,
Path: getNetworkNamespace(sandboxPid),
})
assert.Contains(t, spec.Linux.Namespaces, runtimespec.LinuxNamespace{
Type: runtimespec.IPCNamespace,
Path: getIPCNamespace(sandboxPid),
})
assert.Contains(t, spec.Linux.Namespaces, runtimespec.LinuxNamespace{
Type: runtimespec.UTSNamespace,
Path: getUTSNamespace(sandboxPid),
})
assert.Contains(t, spec.Linux.Namespaces, runtimespec.LinuxNamespace{
Type: runtimespec.PIDNamespace,
Path: getPIDNamespace(sandboxPid),
})
}
return config, sandboxConfig, imageConfig, specCheck
}
func TestGeneralContainerSpec(t *testing.T) {
testID := "test-id"
testPid := uint32(1234)
config, sandboxConfig, imageConfig, specCheck := getStartContainerTestData()
c := newTestCRIContainerdService()
spec, err := c.generateContainerSpec(testID, testPid, config, sandboxConfig, imageConfig, nil)
assert.NoError(t, err)
specCheck(t, testID, testPid, spec)
}
func TestContainerSpecTty(t *testing.T) {
testID := "test-id"
testPid := uint32(1234)
config, sandboxConfig, imageConfig, specCheck := getStartContainerTestData()
c := newTestCRIContainerdService()
for _, tty := range []bool{true, false} {
config.Tty = tty
spec, err := c.generateContainerSpec(testID, testPid, config, sandboxConfig, imageConfig, nil)
assert.NoError(t, err)
specCheck(t, testID, testPid, spec)
assert.Equal(t, tty, spec.Process.Terminal)
}
}
func TestContainerSpecReadonlyRootfs(t *testing.T) {
testID := "test-id"
testPid := uint32(1234)
config, sandboxConfig, imageConfig, specCheck := getStartContainerTestData()
c := newTestCRIContainerdService()
for _, readonly := range []bool{true, false} {
config.Linux.SecurityContext.ReadonlyRootfs = readonly
spec, err := c.generateContainerSpec(testID, testPid, config, sandboxConfig, imageConfig, nil)
assert.NoError(t, err)
specCheck(t, testID, testPid, spec)
assert.Equal(t, readonly, spec.Root.Readonly)
}
}
func TestContainerSpecWithExtraMounts(t *testing.T) {
testID := "test-id"
testPid := uint32(1234)
config, sandboxConfig, imageConfig, specCheck := getStartContainerTestData()
c := newTestCRIContainerdService()
mountInConfig := &runtime.Mount{
ContainerPath: "test-container-path",
HostPath: "test-host-path",
Readonly: false,
}
config.Mounts = append(config.Mounts, mountInConfig)
extraMount := &runtime.Mount{
ContainerPath: "test-container-path",
HostPath: "test-host-path-extra",
Readonly: true,
}
spec, err := c.generateContainerSpec(testID, testPid, config, sandboxConfig, imageConfig, []*runtime.Mount{extraMount})
assert.NoError(t, err)
specCheck(t, testID, testPid, spec)
var mounts []runtimespec.Mount
for _, m := range spec.Mounts {
if m.Destination == "test-container-path" {
mounts = append(mounts, m)
}
}
t.Logf("Extra mounts should come first")
require.Len(t, mounts, 2)
assert.Equal(t, "test-host-path-extra", mounts[0].Source)
assert.Contains(t, mounts[0].Options, "ro")
assert.Equal(t, "test-host-path", mounts[1].Source)
assert.Contains(t, mounts[1].Options, "rw")
}
func TestContainerSpecCommand(t *testing.T) {
for desc, test := range map[string]struct {
criEntrypoint []string
criArgs []string
imageEntrypoint []string
imageArgs []string
expected []string
expectErr bool
}{
"should use cri entrypoint if it's specified": {
criEntrypoint: []string{"a", "b"},
imageEntrypoint: []string{"c", "d"},
imageArgs: []string{"e", "f"},
expected: []string{"a", "b"},
},
"should use cri entrypoint if it's specified even if it's empty": {
criEntrypoint: []string{},
criArgs: []string{"a", "b"},
imageEntrypoint: []string{"c", "d"},
imageArgs: []string{"e", "f"},
expected: []string{"a", "b"},
},
"should use cri entrypoint and args if they are specified": {
criEntrypoint: []string{"a", "b"},
criArgs: []string{"c", "d"},
imageEntrypoint: []string{"e", "f"},
imageArgs: []string{"g", "h"},
expected: []string{"a", "b", "c", "d"},
},
"should use image entrypoint if cri entrypoint is not specified": {
criArgs: []string{"a", "b"},
imageEntrypoint: []string{"c", "d"},
imageArgs: []string{"e", "f"},
expected: []string{"c", "d", "a", "b"},
},
"should use image args if both cri entrypoint and args are not specified": {
imageEntrypoint: []string{"c", "d"},
imageArgs: []string{"e", "f"},
expected: []string{"c", "d", "e", "f"},
},
"should return error if both entrypoint and args are empty": {
expectErr: true,
},
} {
config, _, imageConfig, _ := getStartContainerTestData()
g := generate.New()
config.Command = test.criEntrypoint
config.Args = test.criArgs
imageConfig.Entrypoint = test.imageEntrypoint
imageConfig.Cmd = test.imageArgs
err := setOCIProcessArgs(&g, config, imageConfig)
if test.expectErr {
assert.Error(t, err)
continue
}
assert.NoError(t, err)
assert.Equal(t, test.expected, g.Spec().Process.Args, desc)
}
}
func TestGenerateContainerMounts(t *testing.T) {
testSandboxRootDir := "test-sandbox-root"
for desc, test := range map[string]struct {
securityContext *runtime.LinuxContainerSecurityContext
expectedMounts []*runtime.Mount
}{
"should setup ro mount when rootfs is read-only": {
securityContext: &runtime.LinuxContainerSecurityContext{
ReadonlyRootfs: true,
},
expectedMounts: []*runtime.Mount{
{
ContainerPath: "/etc/hosts",
HostPath: testSandboxRootDir + "/hosts",
Readonly: true,
},
{
ContainerPath: resolvConfPath,
HostPath: testSandboxRootDir + "/resolv.conf",
Readonly: true,
},
{
ContainerPath: "/dev/shm",
HostPath: testSandboxRootDir + "/shm",
Readonly: false,
},
},
},
"should setup rw mount when rootfs is read-write": {
securityContext: &runtime.LinuxContainerSecurityContext{},
expectedMounts: []*runtime.Mount{
{
ContainerPath: "/etc/hosts",
HostPath: testSandboxRootDir + "/hosts",
Readonly: false,
},
{
ContainerPath: resolvConfPath,
HostPath: testSandboxRootDir + "/resolv.conf",
Readonly: false,
},
{
ContainerPath: "/dev/shm",
HostPath: testSandboxRootDir + "/shm",
Readonly: false,
},
},
},
"should use host /dev/shm when host ipc is set": {
securityContext: &runtime.LinuxContainerSecurityContext{
NamespaceOptions: &runtime.NamespaceOption{HostIpc: true},
},
expectedMounts: []*runtime.Mount{
{
ContainerPath: "/etc/hosts",
HostPath: testSandboxRootDir + "/hosts",
Readonly: false,
},
{
ContainerPath: resolvConfPath,
HostPath: testSandboxRootDir + "/resolv.conf",
Readonly: false,
},
{
ContainerPath: "/dev/shm",
HostPath: "/dev/shm",
Readonly: false,
},
},
},
} {
config := &runtime.ContainerConfig{
Metadata: &runtime.ContainerMetadata{
Name: "test-name",
Attempt: 1,
},
Linux: &runtime.LinuxContainerConfig{
SecurityContext: test.securityContext,
},
}
c := newTestCRIContainerdService()
mounts := c.generateContainerMounts(testSandboxRootDir, config)
assert.Equal(t, test.expectedMounts, mounts, desc)
}
}
func TestPrivilegedBindMount(t *testing.T) {
for desc, test := range map[string]struct {
privileged bool
readonlyRootFS bool
expectedSysFSRO bool
expectedCgroupFSRO bool
}{
"sysfs and cgroupfs should mount as 'ro' by default": {
expectedSysFSRO: true,
expectedCgroupFSRO: true,
},
"sysfs and cgroupfs should not mount as 'ro' if privileged": {
privileged: true,
expectedSysFSRO: false,
expectedCgroupFSRO: false,
},
"sysfs should mount as 'ro' if root filrsystem is readonly": {
privileged: true,
readonlyRootFS: true,
expectedSysFSRO: true,
expectedCgroupFSRO: false,
},
} {
t.Logf("TestCase %q", desc)
g := generate.New()
g.SetRootReadonly(test.readonlyRootFS)
addOCIBindMounts(&g, nil, test.privileged)
spec := g.Spec()
if test.expectedSysFSRO {
checkMount(t, spec.Mounts, "sysfs", "/sys", "sysfs", []string{"ro"}, nil)
} else {
checkMount(t, spec.Mounts, "sysfs", "/sys", "sysfs", nil, []string{"ro"})
}
if test.expectedCgroupFSRO {
checkMount(t, spec.Mounts, "cgroup", "/sys/fs/cgroup", "cgroup", []string{"ro"}, nil)
} else {
checkMount(t, spec.Mounts, "cgroup", "/sys/fs/cgroup", "cgroup", nil, []string{"ro"})
}
}
}
func TestStartContainer(t *testing.T) { func TestStartContainer(t *testing.T) {
testID := "test-id" testID := "test-id"
testSandboxID := "test-sandbox-id" testSandboxID := "test-sandbox-id"
testSandboxPid := uint32(4321)
testImageID := "sha256:c75bebcdd211f41b3a460c7bf82970ed6c75acaab9cd4c9a4e125b03ca113799"
config, sandboxConfig, imageConfig, _ := getStartContainerTestData() // TODO: declare and test specCheck see below
testMetadata := &metadata.ContainerMetadata{ testMetadata := &metadata.ContainerMetadata{
ID: testID, ID: testID,
Name: "test-name", Name: "test-name",
SandboxID: testSandboxID, SandboxID: testSandboxID,
Config: config,
ImageRef: testImageID,
CreatedAt: time.Now().UnixNano(), CreatedAt: time.Now().UnixNano(),
} }
testSandboxMetadata := &metadata.SandboxMetadata{ testSandboxMetadata := &metadata.SandboxMetadata{
ID: testSandboxID, ID: testSandboxID,
Name: "test-sandbox-name", Name: "test-sandbox-name",
Config: sandboxConfig,
} }
testSandboxContainer := &task.Task{ testSandboxContainer := &task.Task{
ID: testSandboxID, ID: testSandboxID,
Pid: testSandboxPid, Pid: uint32(4321),
Status: task.StatusRunning, Status: task.StatusRunning,
} }
testMounts := []*mount.Mount{{Type: "bind", Source: "test-source"}} testMounts := []*mount.Mount{{Type: "bind", Source: "test-source"}}
@ -467,7 +59,6 @@ func TestStartContainer(t *testing.T) {
containerMetadata *metadata.ContainerMetadata containerMetadata *metadata.ContainerMetadata
sandboxMetadata *metadata.SandboxMetadata sandboxMetadata *metadata.SandboxMetadata
sandboxContainerdContainer *task.Task sandboxContainerdContainer *task.Task
imageMetadataErr bool
snapshotMountsErr bool snapshotMountsErr bool
prepareFIFOErr error prepareFIFOErr error
createContainerErr error createContainerErr error
@ -476,7 +67,7 @@ func TestStartContainer(t *testing.T) {
expectCalls []string expectCalls []string
expectErr bool expectErr bool
}{ }{
"should return error when container does not exist": { "should return error when container metadata does not exist": {
containerMetadata: nil, containerMetadata: nil,
sandboxMetadata: testSandboxMetadata, sandboxMetadata: testSandboxMetadata,
sandboxContainerdContainer: testSandboxContainer, sandboxContainerdContainer: testSandboxContainer,
@ -488,7 +79,6 @@ func TestStartContainer(t *testing.T) {
ID: testID, ID: testID,
Name: "test-name", Name: "test-name",
SandboxID: testSandboxID, SandboxID: testSandboxID,
Config: config,
CreatedAt: time.Now().UnixNano(), CreatedAt: time.Now().UnixNano(),
StartedAt: time.Now().UnixNano(), StartedAt: time.Now().UnixNano(),
}, },
@ -502,7 +92,6 @@ func TestStartContainer(t *testing.T) {
ID: testID, ID: testID,
Name: "test-name", Name: "test-name",
SandboxID: testSandboxID, SandboxID: testSandboxID,
Config: config,
CreatedAt: time.Now().UnixNano(), CreatedAt: time.Now().UnixNano(),
Removing: true, Removing: true,
}, },
@ -524,22 +113,13 @@ func TestStartContainer(t *testing.T) {
sandboxMetadata: testSandboxMetadata, sandboxMetadata: testSandboxMetadata,
sandboxContainerdContainer: &task.Task{ sandboxContainerdContainer: &task.Task{
ID: testSandboxID, ID: testSandboxID,
Pid: testSandboxPid, Pid: uint32(4321),
Status: task.StatusStopped, Status: task.StatusStopped,
}, },
expectStateChange: true, expectStateChange: true,
expectCalls: []string{"info"}, expectCalls: []string{"info"},
expectErr: true, expectErr: true,
}, },
"should return error when image doesn't exist": {
containerMetadata: testMetadata,
sandboxMetadata: testSandboxMetadata,
sandboxContainerdContainer: testSandboxContainer,
imageMetadataErr: true,
expectStateChange: true,
expectCalls: []string{"info"},
expectErr: true,
},
"should return error when snapshot mounts fails": { "should return error when snapshot mounts fails": {
containerMetadata: testMetadata, containerMetadata: testMetadata,
sandboxMetadata: testSandboxMetadata, sandboxMetadata: testSandboxMetadata,
@ -600,12 +180,6 @@ func TestStartContainer(t *testing.T) {
if test.sandboxContainerdContainer != nil { if test.sandboxContainerdContainer != nil {
fake.SetFakeTasks([]task.Task{*test.sandboxContainerdContainer}) fake.SetFakeTasks([]task.Task{*test.sandboxContainerdContainer})
} }
if !test.imageMetadataErr {
assert.NoError(t, c.imageMetadataStore.Create(metadata.ImageMetadata{
ID: testImageID,
Config: imageConfig,
}))
}
if !test.snapshotMountsErr { if !test.snapshotMountsErr {
fakeSnapshotClient.SetFakeMounts(testID, testMounts) fakeSnapshotClient.SetFakeMounts(testID, testMounts)
} }
@ -651,7 +225,7 @@ func TestStartContainer(t *testing.T) {
assert.NotEmpty(t, meta.Message) assert.NotEmpty(t, meta.Message)
_, err := fake.Info(context.Background(), &execution.InfoRequest{ContainerID: testID}) _, err := fake.Info(context.Background(), &execution.InfoRequest{ContainerID: testID})
assert.True(t, isContainerdGRPCNotFoundError(err), assert.True(t, isContainerdGRPCNotFoundError(err),
"containerd task should be cleaned up after when fail to start") "containerd task should be cleaned up when fail to start")
continue continue
} }
t.Logf("container state should be running when start successfully") t.Logf("container state should be running when start successfully")
@ -661,15 +235,16 @@ func TestStartContainer(t *testing.T) {
pid := info.Task.Pid pid := info.Task.Pid
assert.Equal(t, pid, meta.Pid) assert.Equal(t, pid, meta.Pid)
assert.Equal(t, task.StatusRunning, info.Task.Status) assert.Equal(t, task.StatusRunning, info.Task.Status)
// Check runtime spec
calls := fake.GetCalledDetails() calls := fake.GetCalledDetails()
createOpts, ok := calls[1].Argument.(*execution.CreateRequest) createOpts, ok := calls[1].Argument.(*execution.CreateRequest)
assert.True(t, ok, "2nd call should be create") assert.True(t, ok, "2nd call should be create")
assert.Equal(t, testMounts, createOpts.Rootfs, "rootfs mounts should be correct") _, stdout, stderr := getStreamingPipes(getContainerRootDir(c.rootDir, testID))
// TODO(random-liu): Test other create options. assert.Equal(t, &execution.CreateRequest{
// TODO: Need to create container first.. see Create in containerd/containerd/apsi/services/containers createOpts no longer contains spec ContainerID: testID,
//spec := &runtimespec.Spec{} Rootfs: testMounts,
//assert.NoError(t, json.Unmarshal(createOpts.Spec.Value, spec)) // TODO(random-liu): Test streaming configurations.
//specCheck(t, testID, testSandboxPid, spec) Stdout: stdout,
Stderr: stderr,
}, createOpts, "create options should be correct")
} }
} }

View File

@ -62,7 +62,7 @@ const (
relativeRootfsPath = "rootfs" relativeRootfsPath = "rootfs"
// defaultRuntime is the runtime to use in containerd. We may support // defaultRuntime is the runtime to use in containerd. We may support
// other runtime in the future. // other runtime in the future.
// defaultRuntime = "linux" // TODO defaulRuntime is currently unused defaultRuntime = "linux"
// sandboxesDir contains all sandbox root. A sandbox root is the running // sandboxesDir contains all sandbox root. A sandbox root is the running
// directory of the sandbox, all files created for the sandbox will be // directory of the sandbox, all files created for the sandbox will be
// placed under this directory. // placed under this directory.

View File

@ -99,13 +99,11 @@ func (c *criContainerdService) PullImage(ctx context.Context, r *runtime.PullIma
// TODO(random-liu): [P1] Schema 1 image is not supported in containerd now, we need to support // TODO(random-liu): [P1] Schema 1 image is not supported in containerd now, we need to support
// it for backward compatiblity. // it for backward compatiblity.
cfgDigest, manifestDigest, err := c.pullImage(ctx, image) // TODO(mikebrow): add truncIndex for image id
imageID, manifestDigest, err := c.pullImage(ctx, image)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to pull image %q: %v", image, err) return nil, fmt.Errorf("failed to pull image %q: %v", image, err)
} }
// Use config digest as imageID to conform to oci image spec.
// TODO(mikebrow): add truncIndex for image id
imageID := cfgDigest.String()
glog.V(4).Infof("Pulled image %q with image id %q, manifest digest %q", image, imageID, manifestDigest) glog.V(4).Infof("Pulled image %q with image id %q, manifest digest %q", image, imageID, manifestDigest)
repoDigest, repoTag := getRepoDigestAndTag(namedRef, manifestDigest) repoDigest, repoTag := getRepoDigestAndTag(namedRef, manifestDigest)
@ -188,7 +186,7 @@ func (r *resourceSet) all() map[string]struct{} {
// The ref should be normalized image reference. // The ref should be normalized image reference.
func (c *criContainerdService) pullImage(ctx context.Context, ref string) ( func (c *criContainerdService) pullImage(ctx context.Context, ref string) (
// TODO(random-liu): Replace with client.Pull. // TODO(random-liu): Replace with client.Pull.
imagedigest.Digest, imagedigest.Digest, error) { string, imagedigest.Digest, error) {
// Resolve the image reference to get descriptor and fetcher. // Resolve the image reference to get descriptor and fetcher.
resolver := docker.NewResolver(docker.ResolverOptions{ resolver := docker.NewResolver(docker.ResolverOptions{
// TODO(random-liu): Add authentication by setting credentials. // TODO(random-liu): Add authentication by setting credentials.
@ -213,15 +211,14 @@ func (c *criContainerdService) pullImage(ctx context.Context, ref string) (
// In the future, containerd will rely on the information in the image store to perform image // In the future, containerd will rely on the information in the image store to perform image
// garbage collection. // garbage collection.
// For now, we simply use it to store and retrieve information required for pulling an image. // For now, we simply use it to store and retrieve information required for pulling an image.
if putErr := c.imageStoreService.Put(ctx, ref, desc); putErr != nil { if err = c.imageStoreService.Put(ctx, ref, desc); err != nil {
return "", "", fmt.Errorf("failed to put image %q desc %v into containerd image store: %v", return "", "", fmt.Errorf("failed to put image %q desc %v into containerd image store: %v",
ref, desc, putErr) ref, desc, err)
} }
// TODO(random-liu): What if following operations fail? Do we need to do cleanup? // Do not cleanup if following operations fail so as to make resumable download possible.
resources := newResourceSet()
glog.V(4).Infof("Start downloading resources for image %q", ref) glog.V(4).Infof("Start downloading resources for image %q", ref)
resources := newResourceSet()
// Fetch all image resources into content store. // Fetch all image resources into content store.
// Dispatch a handler which will run a sequence of handlers to: // Dispatch a handler which will run a sequence of handlers to:
// 1) track all resources associated using a customized handler; // 1) track all resources associated using a customized handler;
@ -294,7 +291,14 @@ func (c *criContainerdService) pullImage(ctx context.Context, ref string) (
if err != nil { if err != nil {
return "", "", fmt.Errorf("failed to get config descriptor for image %q: %v", ref, err) return "", "", fmt.Errorf("failed to get config descriptor for image %q: %v", ref, err)
} }
return configDesc.Digest, manifestDigest, nil // Use config digest as imageID to conform to oci image spec, and also add image id as
// image reference.
imageID := configDesc.Digest.String()
if err = c.imageStoreService.Put(ctx, imageID, desc); err != nil {
return "", "", fmt.Errorf("failed to put image id %q into containerd image store: %v",
imageID, err)
}
return imageID, manifestDigest, nil
} }
// waitDownloadingPollInterval is the interval to check resource downloading progress. // waitDownloadingPollInterval is the interval to check resource downloading progress.

View File

@ -50,9 +50,8 @@ func (c *criContainerdService) RemoveImage(ctx context.Context, r *runtime.Remov
// return empty without error when image not found. // return empty without error when image not found.
return &runtime.RemoveImageResponse{}, nil return &runtime.RemoveImageResponse{}, nil
} }
// Also include repo digest, because if user pull image with digest, // Include all image references, including RepoTag, RepoDigest and id.
// there will also be a corresponding repo digest reference. for _, ref := range append(append(meta.RepoTags, meta.RepoDigests...), meta.ID) {
for _, ref := range append(meta.RepoTags, meta.RepoDigests...) {
// TODO(random-liu): Containerd should schedule a garbage collection immediately, // TODO(random-liu): Containerd should schedule a garbage collection immediately,
// and we may want to wait for the garbage collection to be over here. // and we may want to wait for the garbage collection to be over here.
err = c.imageStoreService.Delete(ctx, ref) err = c.imageStoreService.Delete(ctx, ref)
@ -61,8 +60,7 @@ func (c *criContainerdService) RemoveImage(ctx context.Context, r *runtime.Remov
} }
return nil, fmt.Errorf("failed to delete image reference %q for image %q: %v", ref, meta.ID, err) return nil, fmt.Errorf("failed to delete image reference %q for image %q: %v", ref, meta.ID, err)
} }
err = c.imageMetadataStore.Delete(meta.ID) if err = c.imageMetadataStore.Delete(meta.ID); err != nil {
if err != nil {
if metadata.IsNotExistError(err) { if metadata.IsNotExistError(err) {
return &runtime.RemoveImageResponse{}, nil return &runtime.RemoveImageResponse{}, nil
} }

View File

@ -187,6 +187,7 @@ func (c *criContainerdService) RunPodSandbox(ctx context.Context, r *runtime.Run
} }
}() }()
meta.Pid = createResp.Pid
meta.NetNS = getNetworkNamespace(createResp.Pid) meta.NetNS = getNetworkNamespace(createResp.Pid)
if !config.GetLinux().GetSecurityContext().GetNamespaceOptions().GetHostNetwork() { if !config.GetLinux().GetSecurityContext().GetNamespaceOptions().GetHostNetwork() {
// Setup network for sandbox. // Setup network for sandbox.

View File

@ -19,6 +19,7 @@ package server
import ( import (
"fmt" "fmt"
"github.com/containerd/containerd/api/services/containers"
contentapi "github.com/containerd/containerd/api/services/content" contentapi "github.com/containerd/containerd/api/services/content"
diffapi "github.com/containerd/containerd/api/services/diff" diffapi "github.com/containerd/containerd/api/services/diff"
"github.com/containerd/containerd/api/services/execution" "github.com/containerd/containerd/api/services/execution"
@ -77,6 +78,8 @@ type criContainerdService struct {
// containerNameIndex stores all container names and make sure each // containerNameIndex stores all container names and make sure each
// name is unique. // name is unique.
containerNameIndex *registrar.Registrar containerNameIndex *registrar.Registrar
// containerService is containerd tasks client.
containerService containers.ContainersClient
// taskService is containerd tasks client. // taskService is containerd tasks client.
taskService execution.TasksClient taskService execution.TasksClient
// contentStoreService is the containerd content service client. // contentStoreService is the containerd content service client.
@ -114,7 +117,8 @@ func NewCRIContainerdService(conn *grpc.ClientConn, rootDir, networkPluginBinDir
sandboxIDIndex: truncindex.NewTruncIndex(nil), sandboxIDIndex: truncindex.NewTruncIndex(nil),
// TODO(random-liu): Add container id index. // TODO(random-liu): Add container id index.
containerNameIndex: registrar.NewRegistrar(), containerNameIndex: registrar.NewRegistrar(),
taskService: execution.NewTasksClient(conn), containerService: containers.NewContainersClient(conn),
taskService: execution.NewTasksClient(conn),
imageStoreService: imagesservice.NewStoreFromClient(imagesapi.NewImagesClient(conn)), imageStoreService: imagesservice.NewStoreFromClient(imagesapi.NewImagesClient(conn)),
contentStoreService: contentservice.NewStoreFromClient(contentapi.NewContentClient(conn)), contentStoreService: contentservice.NewStoreFromClient(contentapi.NewContentClient(conn)),
snapshotService: snapshotservice.NewSnapshotterFromClient(snapshotapi.NewSnapshotClient(conn)), snapshotService: snapshotservice.NewSnapshotterFromClient(snapshotapi.NewSnapshotClient(conn)),

View File

@ -65,7 +65,8 @@ func newTestCRIContainerdService() *criContainerdService {
sandboxIDIndex: truncindex.NewTruncIndex(nil), sandboxIDIndex: truncindex.NewTruncIndex(nil),
containerStore: metadata.NewContainerStore(store.NewMetadataStore()), containerStore: metadata.NewContainerStore(store.NewMetadataStore()),
containerNameIndex: registrar.NewRegistrar(), containerNameIndex: registrar.NewRegistrar(),
taskService: servertesting.NewFakeExecutionClient(), taskService: servertesting.NewFakeExecutionClient(),
containerService: servertesting.NewFakeContainersClient(),
netPlugin: servertesting.NewFakeCNIPlugin(), netPlugin: servertesting.NewFakeCNIPlugin(),
agentFactory: agentstesting.NewFakeAgentFactory(), agentFactory: agentstesting.NewFakeAgentFactory(),
} }

View File

@ -0,0 +1,181 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testing
import (
"fmt"
"sync"
"github.com/containerd/containerd/api/services/containers"
googleprotobuf "github.com/golang/protobuf/ptypes/empty"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
)
// ContainerNotExistError is the fake error returned when container does not exist.
var ContainerNotExistError = grpc.Errorf(codes.NotFound, "container does not exist")
// FakeContainersClient is a simple fake containers client, so that cri-containerd
// can be run for testing without requiring a real containerd setup.
type FakeContainersClient struct {
sync.Mutex
called []CalledDetail
errors map[string]error
ContainerList map[string]containers.Container
}
var _ containers.ContainersClient = &FakeContainersClient{}
// NewFakeContainersClient creates a FakeContainersClient
func NewFakeContainersClient() *FakeContainersClient {
return &FakeContainersClient{
errors: make(map[string]error),
ContainerList: make(map[string]containers.Container),
}
}
func (f *FakeContainersClient) getError(op string) error {
err, ok := f.errors[op]
if ok {
delete(f.errors, op)
return err
}
return nil
}
// InjectError inject error for call
func (f *FakeContainersClient) InjectError(fn string, err error) {
f.Lock()
defer f.Unlock()
f.errors[fn] = err
}
// InjectErrors inject errors for calls
func (f *FakeContainersClient) InjectErrors(errs map[string]error) {
f.Lock()
defer f.Unlock()
for fn, err := range errs {
f.errors[fn] = err
}
}
// ClearErrors clear errors for call
func (f *FakeContainersClient) ClearErrors() {
f.Lock()
defer f.Unlock()
f.errors = make(map[string]error)
}
func (f *FakeContainersClient) appendCalled(name string, argument interface{}) {
call := CalledDetail{Name: name, Argument: argument}
f.called = append(f.called, call)
}
// GetCalledNames get names of call
func (f *FakeContainersClient) GetCalledNames() []string {
f.Lock()
defer f.Unlock()
names := []string{}
for _, detail := range f.called {
names = append(names, detail.Name)
}
return names
}
// ClearCalls clear all call detail.
func (f *FakeContainersClient) ClearCalls() {
f.Lock()
defer f.Unlock()
f.called = []CalledDetail{}
}
// GetCalledDetails get detail of each call.
func (f *FakeContainersClient) GetCalledDetails() []CalledDetail {
f.Lock()
defer f.Unlock()
// Copy the list and return.
return append([]CalledDetail{}, f.called...)
}
// Create is a test implementation of containers.Create.
func (f *FakeContainersClient) Create(ctx context.Context, createOpts *containers.CreateContainerRequest, opts ...grpc.CallOption) (*containers.CreateContainerResponse, error) {
f.Lock()
defer f.Unlock()
f.appendCalled("create", createOpts)
if err := f.getError("create"); err != nil {
return nil, err
}
_, ok := f.ContainerList[createOpts.Container.ID]
if ok {
return nil, fmt.Errorf("container already exists")
}
f.ContainerList[createOpts.Container.ID] = createOpts.Container
return &containers.CreateContainerResponse{Container: createOpts.Container}, nil
}
// Delete is a test implementation of containers.Delete
func (f *FakeContainersClient) Delete(ctx context.Context, deleteOpts *containers.DeleteContainerRequest, opts ...grpc.CallOption) (*googleprotobuf.Empty, error) {
f.Lock()
defer f.Unlock()
f.appendCalled("delete", deleteOpts)
if err := f.getError("delete"); err != nil {
return nil, err
}
_, ok := f.ContainerList[deleteOpts.ID]
if !ok {
return nil, ContainerNotExistError
}
delete(f.ContainerList, deleteOpts.ID)
return &googleprotobuf.Empty{}, nil
}
// Get is a test implementation of containers.Get
func (f *FakeContainersClient) Get(ctx context.Context, getOpts *containers.GetContainerRequest, opts ...grpc.CallOption) (*containers.GetContainerResponse, error) {
f.Lock()
defer f.Unlock()
f.appendCalled("get", getOpts)
if err := f.getError("get"); err != nil {
return nil, err
}
c, ok := f.ContainerList[getOpts.ID]
if !ok {
return nil, ContainerNotExistError
}
return &containers.GetContainerResponse{Container: c}, nil
}
// List is a test implementation of containers.List
func (f *FakeContainersClient) List(ctx context.Context, listOpts *containers.ListContainersRequest, opts ...grpc.CallOption) (*containers.ListContainersResponse, error) {
f.Lock()
defer f.Unlock()
f.appendCalled("list", listOpts)
if err := f.getError("list"); err != nil {
return nil, err
}
var cs []containers.Container
for _, c := range f.ContainerList {
cs = append(cs, c)
}
return &containers.ListContainersResponse{Containers: cs}, nil
}
// Update is a test implementation of containers.Update
func (f *FakeContainersClient) Update(ctx context.Context, updateOpts *containers.UpdateContainerRequest, opts ...grpc.CallOption) (*containers.UpdateContainerResponse, error) {
// TODO: implement Update()
return nil, nil
}