Signed-off-by: Lantao Liu <lantaol@google.com>
This commit is contained in:
Lantao Liu
2018-09-17 11:37:24 -07:00
parent 0dc7636c0b
commit ab9942cbf9
138 changed files with 7052 additions and 4527 deletions

View File

@@ -101,7 +101,7 @@ make BUILD_TAGS='seccomp apparmor'
| selinux | selinux process and mount labeling | <none> |
| apparmor | apparmor profile support | <none> |
### Validate Your `cri` Setup
A Kubernetes incubator project called [cri-tools](https://github.com/kubernetes-incubator/cri-tools)
A Kubernetes incubator project called [cri-tools](https://github.com/kubernetes-sigs/cri-tools)
includes programs for exercising CRI implementations such as the `cri` plugin.
More importantly, cri-tools includes the program `critest` which is used for running
[CRI Validation Testing](https://github.com/kubernetes/community/blob/master/contributors/devel/cri-validation.md).

View File

@@ -71,6 +71,10 @@ func initCRIService(ic *plugin.InitContext) (interface{}, error) {
}
log.G(ctx).Infof("Start cri plugin with config %+v", c)
if err := validateConfig(&c); err != nil {
return nil, errors.Wrap(err, "invalid config")
}
if err := setGLogLevel(); err != nil {
return nil, errors.Wrap(err, "failed to set glog level")
}
@@ -104,6 +108,18 @@ func initCRIService(ic *plugin.InitContext) (interface{}, error) {
return s, nil
}
// validateConfig validates the given configuration.
func validateConfig(c *criconfig.Config) error {
// It is an error to provide both an UntrustedWorkloadRuntime & define an 'untrusted' runtime.
if _, ok := c.ContainerdConfig.Runtimes[criconfig.RuntimeUntrusted]; ok {
if c.ContainerdConfig.UntrustedWorkloadRuntime.Type != "" {
return errors.New("conflicting definitions: configuration includes untrusted_workload_runtime and runtimes['untrusted']")
}
}
return nil
}
// getServicesOpts get service options from plugin context.
func getServicesOpts(ic *plugin.InitContext) ([]containerd.ServicesOpt, error) {
plugins, err := ic.GetByType(plugin.ServicePlugin)

View File

@@ -33,10 +33,18 @@ type Runtime struct {
type ContainerdConfig struct {
// Snapshotter is the snapshotter used by containerd.
Snapshotter string `toml:"snapshotter" json:"snapshotter"`
// DefaultRuntime is the runtime to use in containerd.
// DefaultRuntime is the default runtime to use in containerd.
// This runtime is used when no runtime handler (or the empty string) is provided.
DefaultRuntime Runtime `toml:"default_runtime" json:"defaultRuntime"`
// UntrustedWorkloadRuntime is a runtime to run untrusted workloads on it.
// DEPRECATED: use Runtimes instead. If provided, this runtime is mapped to the runtime handler
// named 'untrusted'. It is a configuration error to provide both the (now deprecated)
// UntrustedWorkloadRuntime and a handler in the Runtimes handler map (below) for 'untrusted'
// workloads at the same time. Please provide one or the other.
UntrustedWorkloadRuntime Runtime `toml:"untrusted_workload_runtime" json:"untrustedWorkloadRuntime"`
// Runtimes is a map from CRI RuntimeHandler strings, which specify types of runtime
// configurations, to the matching configurations.
Runtimes map[string]Runtime `toml:"runtimes" json:"runtimes"`
// NoPivot disables pivot-root (linux only), required when running a container in a RamDisk with runc
NoPivot bool `toml:"no_pivot" json:"noPivot"`
}
@@ -114,12 +122,22 @@ type PluginConfig struct {
SystemdCgroup bool `toml:"systemd_cgroup" json:"systemdCgroup"`
// EnableTLSStreaming indicates to enable the TLS streaming support.
EnableTLSStreaming bool `toml:"enable_tls_streaming" json:"enableTLSStreaming"`
// X509KeyPairStreaming is a x509 key pair used for TLS streaming
X509KeyPairStreaming `toml:"x509_key_pair_streaming" json:"x509KeyPairStreaming"`
// MaxContainerLogLineSize is the maximum log line size in bytes for a container.
// Log line longer than the limit will be split into multiple lines. Non-positive
// value means no limit.
MaxContainerLogLineSize int `toml:"max_container_log_line_size" json:"maxContainerLogSize"`
}
// X509KeyPairStreaming contains the x509 configuration for streaming
type X509KeyPairStreaming struct {
// TLSCertFile is the path to a certificate file
TLSCertFile string `toml:"tls_cert_file" json:"tlsCertFile"`
// TLSKeyFile is the path to a private key file
TLSKeyFile string `toml:"tls_key_file" json:"tlsKeyFile"`
}
// Config contains all configurations for cri server.
type Config struct {
// PluginConfig is the config for CRI plugin.
@@ -152,10 +170,14 @@ func DefaultConfig() PluginConfig {
},
NoPivot: false,
},
StreamServerAddress: "127.0.0.1",
StreamServerPort: "0",
EnableSelinux: false,
EnableTLSStreaming: false,
StreamServerAddress: "127.0.0.1",
StreamServerPort: "0",
EnableSelinux: false,
EnableTLSStreaming: false,
X509KeyPairStreaming: X509KeyPairStreaming{
TLSKeyFile: "",
TLSCertFile: "",
},
SandboxImage: "k8s.gcr.io/pause:3.1",
StatsCollectPeriod: 10,
SystemdCgroup: false,
@@ -169,3 +191,8 @@ func DefaultConfig() PluginConfig {
},
}
}
const (
// RuntimeUntrusted is the implicit runtime defined for ContainerdConfig.UntrustedWorkloadRuntime
RuntimeUntrusted = "untrusted"
)

View File

@@ -87,13 +87,33 @@ type imageConfig struct {
img ocispec.Image
}
type importConfig struct {
unpack bool
snapshotter string
}
// ImportOption configures import behavior.
type ImportOption func(*importConfig)
// WithUnpack is used to unpack image after import.
func WithUnpack(snapshotter string) ImportOption {
return func(c *importConfig) {
c.unpack = true
c.snapshotter = snapshotter
}
}
// Import implements Docker Image Spec v1.1.
// An image MUST have `manifest.json`.
// `repositories` file in Docker Image Spec v1.0 is not supported (yet).
// Also, the current implementation assumes the implicit file name convention,
// which is not explicitly documented in the spec. (e.g. foobar/layer.tar)
// It returns a group of image references successfully loaded.
func Import(ctx context.Context, client *containerd.Client, reader io.Reader) (_ []string, retErr error) {
func Import(ctx context.Context, client *containerd.Client, reader io.Reader, opts ...ImportOption) (_ []string, retErr error) {
c := &importConfig{}
for _, o := range opts {
o(c)
}
ctx, done, err := client.WithLease(ctx)
if err != nil {
return nil, err
@@ -209,6 +229,12 @@ func Import(ctx context.Context, client *containerd.Client, reader io.Reader) (_
Name: ref,
Target: *desc,
}
if c.unpack {
img := containerd.NewImage(client, imgrec)
if err := img.Unpack(ctx, c.snapshotter); err != nil {
return refs, errors.Wrapf(err, "unpack image %q", ref)
}
}
if _, err := is.Create(ctx, imgrec); err != nil {
if !errdefs.IsAlreadyExists(err) {
return refs, errors.Wrapf(err, "create image ref %+v", imgrec)

View File

@@ -0,0 +1,51 @@
/*
Copyright 2018 The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package opts
import (
"context"
"github.com/containerd/containerd/containers"
"github.com/containerd/containerd/oci"
runtimespec "github.com/opencontainers/runtime-spec/specs-go"
)
// WithAdditionalGIDs adds any additional groups listed for a particular user in the
// /etc/groups file of the image's root filesystem to the OCI spec's additionalGids array.
func WithAdditionalGIDs(userstr string) oci.SpecOpts {
return func(ctx context.Context, client oci.Client, c *containers.Container, s *runtimespec.Spec) (err error) {
gids := s.Process.User.AdditionalGids
if err := oci.WithAdditionalGIDs(userstr)(ctx, client, c, s); err != nil {
return err
}
// Merge existing gids and new gids.
s.Process.User.AdditionalGids = mergeGids(s.Process.User.AdditionalGids, gids)
return nil
}
}
func mergeGids(gids1, gids2 []uint32) []uint32 {
for _, gid1 := range gids1 {
for i, gid2 := range gids2 {
if gid1 == gid2 {
gids2 = append(gids2[:i], gids2[i+1:]...)
break
}
}
}
return append(gids1, gids2...)
}

View File

@@ -43,6 +43,7 @@ type OS interface {
Mount(source string, target string, fstype string, flags uintptr, data string) error
Unmount(target string) error
LookupMount(path string) (mount.Info, error)
Hostname() (string, error)
}
// RealOS is used to dispatch the real system level operations.
@@ -134,3 +135,8 @@ func Unmount(target string) error {
return err
}
// Hostname will call os.Hostname to get the hostname of the host.
func (RealOS) Hostname() (string, error) {
return os.Hostname()
}

View File

@@ -19,6 +19,7 @@ package server
import (
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"time"
@@ -114,13 +115,9 @@ func (c *criService) CreateContainer(ctx context.Context, r *runtime.CreateConta
// Prepare container image snapshot. For container, the image should have
// been pulled before creating the container, so do not ensure the image.
imageRef := config.GetImage().GetImage()
image, err := c.localResolve(ctx, imageRef)
image, err := c.localResolve(config.GetImage().GetImage())
if err != nil {
return nil, errors.Wrapf(err, "failed to resolve image %q", imageRef)
}
if image == nil {
return nil, errors.Errorf("image %q not found", imageRef)
return nil, errors.Wrapf(err, "failed to resolve image %q", config.GetImage().GetImage())
}
// Run container using the same runtime with sandbox.
@@ -232,6 +229,15 @@ func (c *criService) CreateContainer(ctx context.Context, r *runtime.CreateConta
specOpts = append(specOpts, oci.WithUser(userstr))
}
if securityContext.GetRunAsUsername() != "" {
userstr = securityContext.GetRunAsUsername()
} else {
// Even if RunAsUser is not set, we still call `GetValue` to get uid 0.
// Because it is still useful to get additional gids for uid 0.
userstr = strconv.FormatInt(securityContext.GetRunAsUser().GetValue(), 10)
}
specOpts = append(specOpts, customopts.WithAdditionalGIDs(userstr))
apparmorSpecOpts, err := generateApparmorSpecOpts(
securityContext.GetApparmorProfile(),
securityContext.GetPrivileged(),
@@ -333,6 +339,17 @@ func (c *criService) generateContainerSpec(id string, sandboxID string, sandboxP
g.AddProcessEnv("TERM", "xterm")
}
// Add HOSTNAME env.
hostname := sandboxConfig.GetHostname()
if sandboxConfig.GetLinux().GetSecurityContext().GetNamespaceOptions().GetNetwork() == runtime.NamespaceMode_NODE &&
hostname == "" {
hostname, err = c.os.Hostname()
if err != nil {
return nil, err
}
}
g.AddProcessEnv(hostnameEnv, hostname)
// Apply envs from image config first, so that envs from container config
// can override them.
if err := addImageEnvs(&g, imageConfig.Env); err != nil {
@@ -349,12 +366,30 @@ func (c *criService) generateContainerSpec(id string, sandboxID string, sandboxP
return nil, errors.Wrapf(err, "failed to init selinux options %+v", securityContext.GetSelinuxOptions())
}
// Add extra mounts first so that CRI specified mounts can override.
mounts := append(extraMounts, config.GetMounts()...)
// Merge extra mounts and CRI mounts.
mounts := mergeMounts(config.GetMounts(), extraMounts)
if err := c.addOCIBindMounts(&g, mounts, mountLabel); err != nil {
return nil, errors.Wrapf(err, "failed to set OCI bind mounts %+v", mounts)
}
// Apply masked paths if specified.
// When `MaskedPaths` is not specified, keep runtime default for backward compatibility;
// When `MaskedPaths` is specified, but length is zero, clear masked path list.
if securityContext.GetMaskedPaths() != nil {
g.Config.Linux.MaskedPaths = nil
for _, path := range securityContext.GetMaskedPaths() {
g.AddLinuxMaskedPaths(path)
}
}
// Apply readonly paths if specified.
if securityContext.GetReadonlyPaths() != nil {
g.Config.Linux.ReadonlyPaths = nil
for _, path := range securityContext.GetReadonlyPaths() {
g.AddLinuxReadonlyPaths(path)
}
}
if securityContext.GetPrivileged() {
if !sandboxConfig.GetLinux().GetSecurityContext().GetPrivileged() {
return nil, errors.New("no privileged container allowed in sandbox")
@@ -596,6 +631,10 @@ func setOCIDevicesPrivileged(g *generate.Generator) error {
// addOCIBindMounts adds bind mounts.
func (c *criService) addOCIBindMounts(g *generate.Generator, mounts []*runtime.Mount, mountLabel string) error {
// Sort mounts in number of parts. This ensures that high level mounts don't
// shadow other mounts.
sort.Sort(orderedMounts(mounts))
// Mount cgroup into the container as readonly, which inherits docker's behavior.
g.AddMount(runtimespec.Mount{
Source: "cgroup",
@@ -603,6 +642,29 @@ func (c *criService) addOCIBindMounts(g *generate.Generator, mounts []*runtime.M
Type: "cgroup",
Options: []string{"nosuid", "noexec", "nodev", "relatime", "ro"},
})
// Copy all mounts from default mounts, except for
// - mounts overriden by supplied mount;
// - all mounts under /dev if a supplied /dev is present.
mountSet := make(map[string]struct{})
for _, m := range mounts {
mountSet[filepath.Clean(m.ContainerPath)] = struct{}{}
}
defaultMounts := g.Mounts()
g.ClearMounts()
for _, m := range defaultMounts {
dst := filepath.Clean(m.Destination)
if _, ok := mountSet[dst]; ok {
// filter out mount overridden by a supplied mount
continue
}
if _, mountDev := mountSet["/dev"]; mountDev && strings.HasPrefix(dst, "/dev/") {
// filter out everything under /dev if /dev is a supplied mount
continue
}
g.AddMount(m)
}
for _, mount := range mounts {
dst := mount.GetContainerPath()
src := mount.GetHostPath()
@@ -821,10 +883,6 @@ func defaultRuntimeSpec(id string) (*runtimespec.Spec, error) {
if mount.Destination == "/run" {
continue
}
// CRI plugin handles `/dev/shm` itself.
if mount.Destination == "/dev/shm" {
continue
}
mounts = append(mounts, mount)
}
spec.Mounts = mounts
@@ -968,3 +1026,25 @@ func generateUserString(username string, uid, gid *runtime.Int64Value) (string,
}
return userstr, nil
}
// mergeMounts merge CRI mounts with extra mounts. If a mount destination
// is mounted by both a CRI mount and an extra mount, the CRI mount will
// be kept.
func mergeMounts(criMounts, extraMounts []*runtime.Mount) []*runtime.Mount {
var mounts []*runtime.Mount
mounts = append(mounts, criMounts...)
// Copy all mounts from extra mounts, except for mounts overriden by CRI.
for _, e := range extraMounts {
found := false
for _, c := range criMounts {
if filepath.Clean(e.ContainerPath) == filepath.Clean(c.ContainerPath) {
found = true
break
}
}
if !found {
mounts = append(mounts, e)
}
}
return mounts
}

View File

@@ -46,14 +46,15 @@ func (c *criService) ContainerStatus(ctx context.Context, r *runtime.ContainerSt
if err != nil {
return nil, errors.Wrapf(err, "failed to get image %q", imageRef)
}
if len(image.RepoTags) > 0 {
repoTags, repoDigests := parseImageReferences(image.References)
if len(repoTags) > 0 {
// Based on current behavior of dockershim, this field should be
// image tag.
spec = &runtime.ImageSpec{Image: image.RepoTags[0]}
spec = &runtime.ImageSpec{Image: repoTags[0]}
}
if len(image.RepoDigests) > 0 {
if len(repoDigests) > 0 {
// Based on the CRI definition, this field will be consumed by user.
imageRef = image.RepoDigests[0]
imageRef = repoDigests[0]
}
status := toCRIContainerStatus(container, spec, imageRef)
info, err := toCRIContainerInfo(ctx, container, r.GetVerbose())
@@ -112,7 +113,6 @@ type containerInfo struct {
}
// toCRIContainerInfo converts internal container object information to CRI container status response info map.
// TODO(random-liu): Return error instead of logging.
func toCRIContainerInfo(ctx context.Context, container containerstore.Container, verbose bool) (map[string]string, error) {
if !verbose {
return nil, nil

View File

@@ -33,7 +33,10 @@ import (
// killContainerTimeout is the timeout that we wait for the container to
// be SIGKILLed.
const killContainerTimeout = 2 * time.Minute
// The timeout is set to 1 min, because the default CRI operation timeout
// for StopContainer is (2 min + stop timeout). Set to 1 min, so that we
// have enough time for kill(all=true) and kill(all=false).
const killContainerTimeout = 1 * time.Minute
// StopContainer stops a running container with a grace period (i.e., timeout).
func (c *criService) StopContainer(ctx context.Context, r *runtime.StopContainerRequest) (*runtime.StopContainerResponse, error) {
@@ -63,6 +66,16 @@ func (c *criService) stopContainer(ctx context.Context, container containerstore
return nil
}
task, err := container.Container.Task(ctx, nil)
if err != nil {
if !errdefs.IsNotFound(err) {
return errors.Wrapf(err, "failed to stop container, task not found for container %q", id)
}
return nil
}
// We only need to kill the task. The event handler will Delete the
// task from containerd after it handles the Exited event.
if timeout > 0 {
stopSignal := unix.SIGTERM
image, err := c.imageStore.Get(container.ImageRef)
@@ -81,52 +94,47 @@ func (c *criService) stopContainer(ctx context.Context, container containerstore
}
}
logrus.Infof("Stop container %q with signal %v", id, stopSignal)
task, err := container.Container.Task(ctx, nil)
if err != nil {
if !errdefs.IsNotFound(err) {
return errors.Wrapf(err, "failed to stop container, task not found for container %q", id)
}
return nil
}
if task != nil {
if err = task.Kill(ctx, stopSignal); err != nil {
if !errdefs.IsNotFound(err) {
return errors.Wrapf(err, "failed to stop container %q", id)
}
// Move on to make sure container status is updated.
}
if err = task.Kill(ctx, stopSignal); err != nil && !errdefs.IsNotFound(err) {
return errors.Wrapf(err, "failed to stop container %q", id)
}
err = c.waitContainerStop(ctx, container, timeout)
if err == nil {
if err = c.waitContainerStop(ctx, container, timeout); err == nil {
return nil
}
logrus.WithError(err).Errorf("Stop container %q timed out", id)
logrus.WithError(err).Errorf("An error occurs during waiting for container %q to be stopped", id)
}
task, err := container.Container.Task(ctx, nil)
if err != nil {
if !errdefs.IsNotFound(err) {
return errors.Wrapf(err, "failed to stop container, task not found for container %q", id)
}
return nil
}
// Event handler will Delete the container from containerd after it handles the Exited event.
logrus.Infof("Kill container %q", id)
if task != nil {
if err = task.Kill(ctx, unix.SIGKILL, containerd.WithKillAll); err != nil {
if !errdefs.IsNotFound(err) {
return errors.Wrapf(err, "failed to kill container %q", id)
}
// Move on to make sure container status is updated.
}
if err = task.Kill(ctx, unix.SIGKILL, containerd.WithKillAll); err != nil && !errdefs.IsNotFound(err) {
return errors.Wrapf(err, "failed to kill container %q", id)
}
// Wait for a fixed timeout until container stop is observed by event monitor.
if err := c.waitContainerStop(ctx, container, killContainerTimeout); err != nil {
return errors.Wrapf(err, "an error occurs during waiting for container %q to stop", id)
if err = c.waitContainerStop(ctx, container, killContainerTimeout); err == nil {
return nil
}
return nil
logrus.WithError(err).Errorf("An error occurs during waiting for container %q to be killed", id)
// This is a fix for `runc`, and should not break other runtimes. With
// containerd.WithKillAll, `runc` will get all processes from the container
// cgroups, and kill them. However, sometimes the processes may be moved
// out from the container cgroup, e.g. users manually move them by mistake,
// or systemd.Delegate=true is not set.
// In these cases, we should try our best to do cleanup, kill the container
// without containerd.WithKillAll, so that runc can kill the container init
// process directly.
// NOTE(random-liu): If pid namespace is shared inside the pod, non-init processes
// of this container will be left running until the pause container is stopped.
logrus.Infof("Kill container %q init process", id)
if err = task.Kill(ctx, unix.SIGKILL); err != nil && !errdefs.IsNotFound(err) {
return errors.Wrapf(err, "failed to kill container %q init process", id)
}
// Wait for a fixed timeout until container stop is observed by event monitor.
if err = c.waitContainerStop(ctx, container, killContainerTimeout); err == nil {
return nil
}
return errors.Wrapf(err, "an error occurs during waiting for container %q init process to be killed", id)
}
// waitContainerStop waits for container to be stopped until timeout exceeds or context is cancelled.

View File

@@ -34,6 +34,7 @@ import (
ctrdutil "github.com/containerd/cri/pkg/containerd/util"
"github.com/containerd/cri/pkg/store"
containerstore "github.com/containerd/cri/pkg/store/container"
imagestore "github.com/containerd/cri/pkg/store/image"
sandboxstore "github.com/containerd/cri/pkg/store/sandbox"
)
@@ -49,6 +50,7 @@ const (
type eventMonitor struct {
containerStore *containerstore.Store
sandboxStore *sandboxstore.Store
imageStore *imagestore.Store
ch <-chan *events.Envelope
errCh <-chan error
ctx context.Context
@@ -76,12 +78,13 @@ type backOffQueue struct {
// Create new event monitor. New event monitor will start subscribing containerd event. All events
// happen after it should be monitored.
func newEventMonitor(c *containerstore.Store, s *sandboxstore.Store) *eventMonitor {
func newEventMonitor(c *containerstore.Store, s *sandboxstore.Store, i *imagestore.Store) *eventMonitor {
// event subscribe doesn't need namespace.
ctx, cancel := context.WithCancel(context.Background())
return &eventMonitor{
containerStore: c,
sandboxStore: s,
imageStore: i,
ctx: ctx,
cancel: cancel,
backOff: newBackOff(),
@@ -93,12 +96,13 @@ func (em *eventMonitor) subscribe(subscriber events.Subscriber) {
filters := []string{
`topic=="/tasks/exit"`,
`topic=="/tasks/oom"`,
`topic~="/images/"`,
}
em.ch, em.errCh = subscriber.Subscribe(em.ctx, filters...)
}
func convertEvent(e *gogotypes.Any) (string, interface{}, error) {
containerID := ""
id := ""
evt, err := typeurl.UnmarshalAny(e)
if err != nil {
return "", nil, errors.Wrap(err, "failed to unmarshalany")
@@ -106,16 +110,22 @@ func convertEvent(e *gogotypes.Any) (string, interface{}, error) {
switch evt.(type) {
case *eventtypes.TaskExit:
containerID = evt.(*eventtypes.TaskExit).ContainerID
id = evt.(*eventtypes.TaskExit).ContainerID
case *eventtypes.TaskOOM:
containerID = evt.(*eventtypes.TaskOOM).ContainerID
id = evt.(*eventtypes.TaskOOM).ContainerID
case *eventtypes.ImageCreate:
id = evt.(*eventtypes.ImageCreate).Name
case *eventtypes.ImageUpdate:
id = evt.(*eventtypes.ImageUpdate).Name
case *eventtypes.ImageDelete:
id = evt.(*eventtypes.ImageDelete).Name
default:
return "", nil, errors.New("unsupported event")
}
return containerID, evt, nil
return id, evt, nil
}
// start starts the event monitor which monitors and handles all container events. It returns
// start starts the event monitor which monitors and handles all subscribed events. It returns
// an error channel for the caller to wait for stop errors from the event monitor.
// start must be called after subscribe.
func (em *eventMonitor) start() <-chan error {
@@ -130,19 +140,19 @@ func (em *eventMonitor) start() <-chan error {
select {
case e := <-em.ch:
logrus.Debugf("Received containerd event timestamp - %v, namespace - %q, topic - %q", e.Timestamp, e.Namespace, e.Topic)
cID, evt, err := convertEvent(e.Event)
id, evt, err := convertEvent(e.Event)
if err != nil {
logrus.WithError(err).Errorf("Failed to convert event %+v", e)
break
}
if em.backOff.isInBackOff(cID) {
logrus.Infof("Events for container %q is in backoff, enqueue event %+v", cID, evt)
em.backOff.enBackOff(cID, evt)
if em.backOff.isInBackOff(id) {
logrus.Infof("Events for %q is in backoff, enqueue event %+v", id, evt)
em.backOff.enBackOff(id, evt)
break
}
if err := em.handleEvent(evt); err != nil {
logrus.WithError(err).Errorf("Failed to handle event %+v for container %s", evt, cID)
em.backOff.enBackOff(cID, evt)
logrus.WithError(err).Errorf("Failed to handle event %+v for %s", evt, id)
em.backOff.enBackOff(id, evt)
}
case err := <-em.errCh:
// Close errCh in defer directly if there is no error.
@@ -152,13 +162,13 @@ func (em *eventMonitor) start() <-chan error {
}
return
case <-backOffCheckCh:
cIDs := em.backOff.getExpiredContainers()
for _, cID := range cIDs {
queue := em.backOff.deBackOff(cID)
ids := em.backOff.getExpiredIDs()
for _, id := range ids {
queue := em.backOff.deBackOff(id)
for i, any := range queue.events {
if err := em.handleEvent(any); err != nil {
logrus.WithError(err).Errorf("Failed to handle backOff event %+v for container %s", any, cID)
em.backOff.reBackOff(cID, queue.events[i:], queue.duration)
logrus.WithError(err).Errorf("Failed to handle backOff event %+v for %s", any, id)
em.backOff.reBackOff(id, queue.events[i:], queue.duration)
break
}
}
@@ -230,6 +240,18 @@ func (em *eventMonitor) handleEvent(any interface{}) error {
if err != nil {
return errors.Wrap(err, "failed to update container status for TaskOOM event")
}
case *eventtypes.ImageCreate:
e := any.(*eventtypes.ImageCreate)
logrus.Infof("ImageCreate event %+v", e)
return em.imageStore.Update(ctx, e.Name)
case *eventtypes.ImageUpdate:
e := any.(*eventtypes.ImageUpdate)
logrus.Infof("ImageUpdate event %+v", e)
return em.imageStore.Update(ctx, e.Name)
case *eventtypes.ImageDelete:
e := any.(*eventtypes.ImageDelete)
logrus.Infof("ImageDelete event %+v", e)
return em.imageStore.Update(ctx, e.Name)
}
return nil
@@ -331,14 +353,14 @@ func newBackOff() *backOff {
}
}
func (b *backOff) getExpiredContainers() []string {
var containers []string
for c, q := range b.queuePool {
func (b *backOff) getExpiredIDs() []string {
var ids []string
for id, q := range b.queuePool {
if q.isExpire() {
containers = append(containers, c)
ids = append(ids, id)
}
}
return containers
return ids
}
func (b *backOff) isInBackOff(key string) bool {

View File

@@ -17,22 +17,19 @@ limitations under the License.
package server
import (
"encoding/json"
"fmt"
"os"
"path"
"path/filepath"
"regexp"
"strconv"
"strings"
"github.com/containerd/containerd"
"github.com/containerd/containerd/containers"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/runtime/linux/runctypes"
"github.com/containerd/typeurl"
"github.com/docker/distribution/reference"
imagedigest "github.com/opencontainers/go-digest"
"github.com/opencontainers/image-spec/identity"
imagespec "github.com/opencontainers/image-spec/specs-go/v1"
runtimespec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/generate"
"github.com/opencontainers/selinux/go-selinux"
@@ -95,6 +92,8 @@ const (
etcHosts = "/etc/hosts"
// resolvConfPath is the abs path of resolv.conf on host or container.
resolvConfPath = "/etc/resolv.conf"
// hostnameEnv is the key for HOSTNAME env.
hostnameEnv = "HOSTNAME"
)
const (
@@ -234,28 +233,25 @@ func getRepoDigestAndTag(namedRef reference.Named, digest imagedigest.Digest, sc
return repoDigest, repoTag
}
// localResolve resolves image reference locally and returns corresponding image metadata. It returns
// nil without error if the reference doesn't exist.
func (c *criService) localResolve(ctx context.Context, refOrID string) (*imagestore.Image, error) {
// localResolve resolves image reference locally and returns corresponding image metadata. It
// returns store.ErrNotExist if the reference doesn't exist.
func (c *criService) localResolve(refOrID string) (imagestore.Image, error) {
getImageID := func(refOrId string) string {
if _, err := imagedigest.Parse(refOrID); err == nil {
return refOrID
}
return func(ref string) string {
// ref is not image id, try to resolve it locally.
// TODO(random-liu): Handle this error better for debugging.
normalized, err := util.NormalizeImageRef(ref)
if err != nil {
return ""
}
image, err := c.client.GetImage(ctx, normalized.String())
id, err := c.imageStore.Resolve(normalized.String())
if err != nil {
return ""
}
desc, err := image.Config(ctx)
if err != nil {
return ""
}
return desc.Digest.String()
return id
}(refOrID)
}
@@ -264,14 +260,7 @@ func (c *criService) localResolve(ctx context.Context, refOrID string) (*imagest
// Try to treat ref as imageID
imageID = refOrID
}
image, err := c.imageStore.Get(imageID)
if err != nil {
if err == store.ErrNotExist {
return nil, nil
}
return nil, errors.Wrapf(err, "failed to get image %q", imageID)
}
return &image, nil
return c.imageStore.Get(imageID)
}
// getUserFromImage gets uid or user name of the image user.
@@ -296,12 +285,12 @@ func getUserFromImage(user string) (*int64, string) {
// ensureImageExists returns corresponding metadata of the image reference, if image is not
// pulled yet, the function will pull the image.
func (c *criService) ensureImageExists(ctx context.Context, ref string) (*imagestore.Image, error) {
image, err := c.localResolve(ctx, ref)
if err != nil {
return nil, errors.Wrapf(err, "failed to resolve image %q", ref)
image, err := c.localResolve(ref)
if err != nil && err != store.ErrNotExist {
return nil, errors.Wrapf(err, "failed to get image %q", ref)
}
if image != nil {
return image, nil
if err == nil {
return &image, nil
}
// Pull image to ensure the image exists
resp, err := c.PullImage(ctx, &runtime.PullImageRequest{Image: &runtime.ImageSpec{Image: ref}})
@@ -312,56 +301,11 @@ func (c *criService) ensureImageExists(ctx context.Context, ref string) (*images
newImage, err := c.imageStore.Get(imageID)
if err != nil {
// It's still possible that someone removed the image right after it is pulled.
return nil, errors.Wrapf(err, "failed to get image %q metadata after pulling", imageID)
return nil, errors.Wrapf(err, "failed to get image %q after pulling", imageID)
}
return &newImage, nil
}
// imageInfo is the information about the image got from containerd.
type imageInfo struct {
id string
chainID imagedigest.Digest
size int64
imagespec imagespec.Image
}
// getImageInfo gets image info from containerd.
func getImageInfo(ctx context.Context, image containerd.Image) (*imageInfo, error) {
// Get image information.
diffIDs, err := image.RootFS(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to get image diffIDs")
}
chainID := identity.ChainID(diffIDs)
size, err := image.Size(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to get image compressed resource size")
}
desc, err := image.Config(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to get image config descriptor")
}
id := desc.Digest.String()
rb, err := content.ReadBlob(ctx, image.ContentStore(), desc)
if err != nil {
return nil, errors.Wrap(err, "failed to read image config from content store")
}
var ociimage imagespec.Image
if err := json.Unmarshal(rb, &ociimage); err != nil {
return nil, errors.Wrapf(err, "failed to unmarshal image config %s", rb)
}
return &imageInfo{
id: id,
chainID: chainID,
size: size,
imagespec: ociimage,
}, nil
}
func initSelinuxOpts(selinuxOpt *runtime.SELinuxOption) (string, string, error) {
if selinuxOpt == nil {
return "", "", nil
@@ -370,11 +314,16 @@ func initSelinuxOpts(selinuxOpt *runtime.SELinuxOption) (string, string, error)
// Should ignored selinuxOpts if they are incomplete.
if selinuxOpt.GetUser() == "" ||
selinuxOpt.GetRole() == "" ||
selinuxOpt.GetType() == "" ||
selinuxOpt.GetLevel() == "" {
selinuxOpt.GetType() == "" {
return "", "", nil
}
// make sure the format of "level" is correct.
ok, err := checkSelinuxLevel(selinuxOpt.GetLevel())
if err != nil || !ok {
return "", "", err
}
labelOpts := fmt.Sprintf("%s:%s:%s:%s",
selinuxOpt.GetUser(),
selinuxOpt.GetRole(),
@@ -383,6 +332,18 @@ func initSelinuxOpts(selinuxOpt *runtime.SELinuxOption) (string, string, error)
return label.InitLabels(selinux.DupSecOpt(labelOpts))
}
func checkSelinuxLevel(level string) (bool, error) {
if len(level) == 0 {
return true, nil
}
matched, err := regexp.MatchString(`^s\d(-s\d)??(:c\d{1,4}((.c\d{1,4})?,c\d{1,4})*(.c\d{1,4})?(,c\d{1,4}(.c\d{1,4})?)*)?$`, level)
if err != nil || !matched {
return false, fmt.Errorf("the format of 'level' %q is not correct: %v", level, err)
}
return true, nil
}
// isInCRIMounts checks whether a destination is in CRI mount list.
func isInCRIMounts(dst string, mounts []*runtime.Mount) bool {
for _, m := range mounts {
@@ -454,3 +415,48 @@ func toRuntimeAuthConfig(a criconfig.AuthConfig) *runtime.AuthConfig {
IdentityToken: a.IdentityToken,
}
}
// mounts defines how to sort runtime.Mount.
// This is the same with the Docker implementation:
// https://github.com/moby/moby/blob/17.05.x/daemon/volumes.go#L26
type orderedMounts []*runtime.Mount
// Len returns the number of mounts. Used in sorting.
func (m orderedMounts) Len() int {
return len(m)
}
// Less returns true if the number of parts (a/b/c would be 3 parts) in the
// mount indexed by parameter 1 is less than that of the mount indexed by
// parameter 2. Used in sorting.
func (m orderedMounts) Less(i, j int) bool {
return m.parts(i) < m.parts(j)
}
// Swap swaps two items in an array of mounts. Used in sorting
func (m orderedMounts) Swap(i, j int) {
m[i], m[j] = m[j], m[i]
}
// parts returns the number of parts in the destination of a mount. Used in sorting.
func (m orderedMounts) parts(i int) int {
return strings.Count(filepath.Clean(m[i].ContainerPath), string(os.PathSeparator))
}
// parseImageReferences parses a list of arbitrary image references and returns
// the repotags and repodigests
func parseImageReferences(refs []string) ([]string, []string) {
var tags, digests []string
for _, ref := range refs {
parsed, err := reference.ParseAnyReference(ref)
if err != nil {
continue
}
if _, ok := parsed.(reference.Canonical); ok {
digests = append(digests, parsed.String())
} else if _, ok := parsed.(reference.Tagged); ok {
tags = append(tags, parsed.String())
}
}
return tags, digests
}

View File

@@ -19,8 +19,6 @@ package server
import (
"golang.org/x/net/context"
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
imagestore "github.com/containerd/cri/pkg/store/image"
)
// ListImages lists existing images.
@@ -38,19 +36,3 @@ func (c *criService) ListImages(ctx context.Context, r *runtime.ListImagesReques
return &runtime.ListImagesResponse{Images: images}, nil
}
// toCRIImage converts image to CRI image type.
func toCRIImage(image imagestore.Image) *runtime.Image {
runtimeImage := &runtime.Image{
Id: image.ID,
RepoTags: image.RepoTags,
RepoDigests: image.RepoDigests,
Size_: uint64(image.Size),
}
uid, username := getUserFromImage(image.ImageSpec.Config.User)
if uid != nil {
runtimeImage.Uid = &runtime.Int64Value{Value: *uid}
}
runtimeImage.Username = username
return runtimeImage
}

View File

@@ -26,7 +26,6 @@ import (
api "github.com/containerd/cri/pkg/api/v1"
"github.com/containerd/cri/pkg/containerd/importer"
imagestore "github.com/containerd/cri/pkg/store/image"
)
// LoadImage loads a image into containerd.
@@ -39,42 +38,16 @@ func (c *criService) LoadImage(ctx context.Context, r *api.LoadImageRequest) (*a
if err != nil {
return nil, errors.Wrap(err, "failed to open file")
}
repoTags, err := importer.Import(ctx, c.client, f)
repoTags, err := importer.Import(ctx, c.client, f, importer.WithUnpack(c.config.ContainerdConfig.Snapshotter))
if err != nil {
return nil, errors.Wrap(err, "failed to import image")
}
for _, repoTag := range repoTags {
image, err := c.client.GetImage(ctx, repoTag)
if err != nil {
return nil, errors.Wrapf(err, "failed to get image %q", repoTag)
// Update image store to reflect the newest state in containerd.
if err := c.imageStore.Update(ctx, repoTag); err != nil {
return nil, errors.Wrapf(err, "failed to update image store %q", repoTag)
}
if err := image.Unpack(ctx, c.config.ContainerdConfig.Snapshotter); err != nil {
logrus.WithError(err).Warnf("Failed to unpack image %q", repoTag)
// Do not fail image importing. Unpack will be retried when container creation.
}
info, err := getImageInfo(ctx, image)
if err != nil {
return nil, errors.Wrapf(err, "failed to get image %q info", repoTag)
}
id := info.id
if err := c.createImageReference(ctx, id, image.Target()); err != nil {
return nil, errors.Wrapf(err, "failed to create image reference %q", id)
}
img := imagestore.Image{
ID: id,
RepoTags: []string{repoTag},
ChainID: info.chainID.String(),
Size: info.size,
ImageSpec: info.imagespec,
Image: image,
}
if err := c.imageStore.Add(img); err != nil {
return nil, errors.Wrapf(err, "failed to add image %q into store", id)
}
logrus.Debugf("Imported image with id %q, repo tag %q", id, repoTag)
logrus.Debugf("Imported image %q", repoTag)
}
return &api.LoadImageResponse{Images: repoTags}, nil
}

View File

@@ -34,7 +34,6 @@ import (
"golang.org/x/net/context"
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
imagestore "github.com/containerd/cri/pkg/store/image"
"github.com/containerd/cri/pkg/util"
)
@@ -108,49 +107,34 @@ func (c *criService) PullImage(ctx context.Context, r *runtime.PullImageRequest)
return nil, errors.Wrapf(err, "failed to pull and unpack image %q", ref)
}
// Get image information.
info, err := getImageInfo(ctx, image)
configDesc, err := image.Config(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to get image information")
return nil, errors.Wrap(err, "get image config descriptor")
}
imageID := info.id
imageID := configDesc.Digest.String()
repoDigest, repoTag := getRepoDigestAndTag(namedRef, image.Target().Digest, isSchema1)
for _, r := range []string{repoTag, repoDigest, imageID} {
for _, r := range []string{repoTag, repoDigest} {
if r == "" {
continue
}
if err := c.createImageReference(ctx, r, image.Target()); err != nil {
return nil, errors.Wrapf(err, "failed to update image reference %q", r)
}
// Update image store to reflect the newest state in containerd.
if err := c.imageStore.Update(ctx, r); err != nil {
return nil, errors.Wrapf(err, "failed to update image store %q", r)
}
}
logrus.Debugf("Pulled image %q with image id %q, repo tag %q, repo digest %q", imageRef, imageID,
repoTag, repoDigest)
img := imagestore.Image{
ID: imageID,
ChainID: info.chainID.String(),
Size: info.size,
ImageSpec: info.imagespec,
Image: image,
}
if repoDigest != "" {
img.RepoDigests = []string{repoDigest}
}
if repoTag != "" {
img.RepoTags = []string{repoTag}
}
if err := c.imageStore.Add(img); err != nil {
return nil, errors.Wrapf(err, "failed to add image %q into store", img.ID)
}
// NOTE(random-liu): the actual state in containerd is the source of truth, even we maintain
// in-memory image store, it's only for in-memory indexing. The image could be removed
// by someone else anytime, before/during/after we create the metadata. We should always
// check the actual state in containerd before using the image or returning status of the
// image.
return &runtime.PullImageResponse{ImageRef: img.ID}, nil
return &runtime.PullImageResponse{ImageRef: imageID}, nil
}
// ParseAuth parses AuthConfig and returns username and password/secret required by containerd.

View File

@@ -20,9 +20,10 @@ import (
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/images"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
"github.com/containerd/cri/pkg/store"
)
// RemoveImage removes the image.
@@ -32,62 +33,33 @@ import (
// Remove the whole image no matter the it's image id or reference. This is the
// semantic defined in CRI now.
func (c *criService) RemoveImage(ctx context.Context, r *runtime.RemoveImageRequest) (*runtime.RemoveImageResponse, error) {
image, err := c.localResolve(ctx, r.GetImage().GetImage())
image, err := c.localResolve(r.GetImage().GetImage())
if err != nil {
if err == store.ErrNotExist {
// return empty without error when image not found.
return &runtime.RemoveImageResponse{}, nil
}
return nil, errors.Wrapf(err, "can not resolve %q locally", r.GetImage().GetImage())
}
if image == nil {
// return empty without error when image not found.
return &runtime.RemoveImageResponse{}, nil
}
// Exclude outdated image tag.
for i, tag := range image.RepoTags {
cImage, err := c.client.GetImage(ctx, tag)
if err != nil {
if errdefs.IsNotFound(err) {
continue
}
return nil, errors.Wrapf(err, "failed to get image %q", tag)
// Remove all image references.
for i, ref := range image.References {
var opts []images.DeleteOpt
if i == len(image.References)-1 {
// Delete the last image reference synchronously to trigger garbage collection.
// This is best effort. It is possible that the image reference is deleted by
// someone else before this point.
opts = []images.DeleteOpt{images.SynchronousDelete()}
}
desc, err := cImage.Config(ctx)
if err != nil {
// We can only get image id by reading Config from content.
// If the config is missing, we will fail to get image id,
// So we won't be able to remove the image forever,
// and the cri plugin always reports the image is ok.
// But we also don't check it by manifest,
// It's possible that two manifest digests have the same image ID in theory.
// In theory it's possible that an image is compressed with different algorithms,
// then they'll have the same uncompressed id - image id,
// but different ids generated from compressed contents - manifest digest.
// So we decide to leave it.
// After all, the user can override the repoTag by pulling image again.
logrus.WithError(err).Errorf("Can't remove image,failed to get config for Image tag %q,id %q", tag, image.ID)
image.RepoTags = append(image.RepoTags[:i], image.RepoTags[i+1:]...)
continue
}
cID := desc.Digest.String()
if cID != image.ID {
logrus.Debugf("Image tag %q for %q is outdated, it's currently used by %q", tag, image.ID, cID)
image.RepoTags = append(image.RepoTags[:i], image.RepoTags[i+1:]...)
continue
}
}
// Include all image references, including RepoTag, RepoDigest and id.
for _, ref := range append(image.RepoTags, image.RepoDigests...) {
err = c.client.ImageService().Delete(ctx, ref)
err = c.client.ImageService().Delete(ctx, ref, opts...)
if err == nil || errdefs.IsNotFound(err) {
// Update image store to reflect the newest state in containerd.
if err := c.imageStore.Update(ctx, ref); err != nil {
return nil, errors.Wrapf(err, "failed to update image reference %q for %q", ref, image.ID)
}
continue
}
return nil, errors.Wrapf(err, "failed to delete image reference %q for image %q", ref, image.ID)
return nil, errors.Wrapf(err, "failed to delete image reference %q for %q", ref, image.ID)
}
// Delete image id synchronously to trigger garbage collection.
err = c.client.ImageService().Delete(ctx, image.ID, images.SynchronousDelete())
if err != nil && !errdefs.IsNotFound(err) {
return nil, errors.Wrapf(err, "failed to delete image id %q", image.ID)
}
c.imageStore.Delete(image.ID)
return &runtime.RemoveImageResponse{}, nil
}

View File

@@ -24,6 +24,7 @@ import (
"golang.org/x/net/context"
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
"github.com/containerd/cri/pkg/store"
imagestore "github.com/containerd/cri/pkg/store/image"
imagespec "github.com/opencontainers/image-spec/specs-go/v1"
)
@@ -32,19 +33,19 @@ import (
// TODO(random-liu): We should change CRI to distinguish image id and image spec. (See
// kubernetes/kubernetes#46255)
func (c *criService) ImageStatus(ctx context.Context, r *runtime.ImageStatusRequest) (*runtime.ImageStatusResponse, error) {
image, err := c.localResolve(ctx, r.GetImage().GetImage())
image, err := c.localResolve(r.GetImage().GetImage())
if err != nil {
if err == store.ErrNotExist {
// return empty without error when image not found.
return &runtime.ImageStatusResponse{}, nil
}
return nil, errors.Wrapf(err, "can not resolve %q locally", r.GetImage().GetImage())
}
if image == nil {
// return empty without error when image not found.
return &runtime.ImageStatusResponse{}, nil
}
// TODO(random-liu): [P0] Make sure corresponding snapshot exists. What if snapshot
// doesn't exist?
runtimeImage := toCRIRuntimeImage(image)
info, err := c.toCRIImageInfo(ctx, image, r.GetVerbose())
runtimeImage := toCRIImage(image)
info, err := c.toCRIImageInfo(ctx, &image, r.GetVerbose())
if err != nil {
return nil, errors.Wrap(err, "failed to generate image info")
}
@@ -55,12 +56,13 @@ func (c *criService) ImageStatus(ctx context.Context, r *runtime.ImageStatusRequ
}, nil
}
// toCRIRuntimeImage converts internal image object to CRI runtime.Image.
func toCRIRuntimeImage(image *imagestore.Image) *runtime.Image {
// toCRIImage converts internal image object to CRI runtime.Image.
func toCRIImage(image imagestore.Image) *runtime.Image {
repoTags, repoDigests := parseImageReferences(image.References)
runtimeImage := &runtime.Image{
Id: image.ID,
RepoTags: image.RepoTags,
RepoDigests: image.RepoDigests,
RepoTags: repoTags,
RepoDigests: repoDigests,
Size_: uint64(image.Size),
}
uid, username := getUserFromImage(image.ImageSpec.Config.User)

View File

@@ -28,7 +28,6 @@ import (
containerdimages "github.com/containerd/containerd/images"
"github.com/containerd/containerd/platforms"
"github.com/containerd/typeurl"
"github.com/docker/distribution/reference"
"github.com/docker/docker/pkg/system"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -97,16 +96,7 @@ func (c *criService) recover(ctx context.Context) error {
if err != nil {
return errors.Wrap(err, "failed to list images")
}
images, err := loadImages(ctx, cImages, c.config.ContainerdConfig.Snapshotter)
if err != nil {
return errors.Wrap(err, "failed to load images")
}
for _, image := range images {
logrus.Debugf("Loaded image %+v", image)
if err := c.imageStore.Add(image); err != nil {
return errors.Wrapf(err, "failed to add image %q to store", image.ID)
}
}
loadImages(ctx, c.imageStore, cImages, c.config.ContainerdConfig.Snapshotter)
// It's possible that containerd containers are deleted unexpectedly. In that case,
// we can't even get metadata, we should cleanup orphaned sandbox/container directories
@@ -404,26 +394,9 @@ func loadSandbox(ctx context.Context, cntr containerd.Container) (sandboxstore.S
}
// loadImages loads images from containerd.
// TODO(random-liu): Check whether image is unpacked, because containerd put image reference
// into store before image is unpacked.
func loadImages(ctx context.Context, cImages []containerd.Image,
snapshotter string) ([]imagestore.Image, error) {
// Group images by image id.
imageMap := make(map[string][]containerd.Image)
func loadImages(ctx context.Context, store *imagestore.Store, cImages []containerd.Image,
snapshotter string) {
for _, i := range cImages {
desc, err := i.Config(ctx)
if err != nil {
logrus.WithError(err).Warnf("Failed to get image config for %q", i.Name())
continue
}
id := desc.Digest.String()
imageMap[id] = append(imageMap[id], i)
}
var images []imagestore.Image
for id, imgs := range imageMap {
// imgs len must be > 0, or else the entry will not be created in
// previous loop.
i := imgs[0]
ok, _, _, _, err := containerdimages.Check(ctx, i.ContentStore(), i.Target(), platforms.Default())
if err != nil {
logrus.WithError(err).Errorf("Failed to check image content readiness for %q", i.Name())
@@ -436,48 +409,19 @@ func loadImages(ctx context.Context, cImages []containerd.Image,
// Checking existence of top-level snapshot for each image being recovered.
unpacked, err := i.IsUnpacked(ctx, snapshotter)
if err != nil {
logrus.WithError(err).Warnf("Failed to Check whether image is unpacked for image %s", i.Name())
logrus.WithError(err).Warnf("Failed to check whether image is unpacked for image %s", i.Name())
continue
}
if !unpacked {
logrus.Warnf("The image %s is not unpacked.", i.Name())
// TODO(random-liu): Consider whether we should try unpack here.
}
info, err := getImageInfo(ctx, i)
if err != nil {
logrus.WithError(err).Warnf("Failed to get image info for %q", i.Name())
if err := store.Update(ctx, i.Name()); err != nil {
logrus.WithError(err).Warnf("Failed to update reference for image %q", i.Name())
continue
}
image := imagestore.Image{
ID: id,
ChainID: info.chainID.String(),
Size: info.size,
ImageSpec: info.imagespec,
Image: i,
}
// Recover repo digests and repo tags.
for _, i := range imgs {
name := i.Name()
r, err := reference.ParseAnyReference(name)
if err != nil {
logrus.WithError(err).Warnf("Failed to parse image reference %q", name)
continue
}
if _, ok := r.(reference.Canonical); ok {
image.RepoDigests = append(image.RepoDigests, name)
} else if _, ok := r.(reference.Tagged); ok {
image.RepoTags = append(image.RepoTags, name)
} else if _, ok := r.(reference.Digested); ok {
// This is an image id.
continue
} else {
logrus.Warnf("Invalid image reference %q", name)
}
}
images = append(images, image)
logrus.Debugf("Loaded image %q", i.Name())
}
return images, nil
}
func cleanupOrphanedIDDirs(cntrs []containerd.Container, base string) error {

View File

@@ -74,9 +74,10 @@ func (c *criService) RunPodSandbox(ctx context.Context, r *runtime.RunPodSandbox
// Create initial internal sandbox object.
sandbox := sandboxstore.NewSandbox(
sandboxstore.Metadata{
ID: id,
Name: name,
Config: config,
ID: id,
Name: name,
Config: config,
RuntimeHandler: r.GetRuntimeHandler(),
},
sandboxstore.Status{
State: sandboxstore.StateUnknown,
@@ -131,7 +132,7 @@ func (c *criService) RunPodSandbox(ctx context.Context, r *runtime.RunPodSandbox
}()
}
ociRuntime, err := c.getSandboxRuntime(config)
ociRuntime, err := c.getSandboxRuntime(config, r.GetRuntimeHandler())
if err != nil {
return nil, errors.Wrap(err, "failed to get sandbox runtime")
}
@@ -558,6 +559,9 @@ func toCNIPortMappings(criPortMappings []*runtime.PortMapping) []cni.PortMapping
if mapping.HostPort <= 0 {
continue
}
if mapping.Protocol != runtime.Protocol_TCP && mapping.Protocol != runtime.Protocol_UDP {
continue
}
portMappings = append(portMappings, cni.PortMapping{
HostPort: mapping.HostPort,
ContainerPort: mapping.ContainerPort,
@@ -601,9 +605,13 @@ func hostAccessingSandbox(config *runtime.PodSandboxConfig) bool {
// getSandboxRuntime returns the runtime configuration for sandbox.
// If the sandbox contains untrusted workload, runtime for untrusted workload will be returned,
// or else default runtime will be returned.
func (c *criService) getSandboxRuntime(config *runtime.PodSandboxConfig) (criconfig.Runtime, error) {
untrusted := false
func (c *criService) getSandboxRuntime(config *runtime.PodSandboxConfig, runtimeHandler string) (criconfig.Runtime, error) {
if untrustedWorkload(config) {
// If the untrusted annotation is provided, runtimeHandler MUST be empty.
if runtimeHandler != "" && runtimeHandler != criconfig.RuntimeUntrusted {
return criconfig.Runtime{}, errors.New("untrusted workload with explicit runtime handler is not allowed")
}
// If the untrusted workload is requesting access to the host/node, this request will fail.
//
// Note: If the workload is marked untrusted but requests privileged, this can be granted, as the
@@ -612,14 +620,22 @@ func (c *criService) getSandboxRuntime(config *runtime.PodSandboxConfig) (cricon
if hostAccessingSandbox(config) {
return criconfig.Runtime{}, errors.New("untrusted workload with host access is not allowed")
}
untrusted = true
// Handle the deprecated UntrustedWorkloadRuntime.
if c.config.ContainerdConfig.UntrustedWorkloadRuntime.Type != "" {
return c.config.ContainerdConfig.UntrustedWorkloadRuntime, nil
}
runtimeHandler = criconfig.RuntimeUntrusted
}
if untrusted {
if c.config.ContainerdConfig.UntrustedWorkloadRuntime.Type == "" {
return criconfig.Runtime{}, errors.New("no runtime for untrusted workload is configured")
}
return c.config.ContainerdConfig.UntrustedWorkloadRuntime, nil
if runtimeHandler == "" {
return c.config.ContainerdConfig.DefaultRuntime, nil
}
return c.config.ContainerdConfig.DefaultRuntime, nil
handler, ok := c.config.ContainerdConfig.Runtimes[runtimeHandler]
if !ok {
return criconfig.Runtime{}, errors.Errorf("no runtime for %q is configured", runtimeHandler)
}
return handler, nil
}

View File

@@ -102,15 +102,16 @@ func toCRISandboxStatus(meta sandboxstore.Metadata, status sandboxstore.Status,
// TODO (mikebrow): discuss predefining constants structures for some or all of these field names in CRI
type sandboxInfo struct {
Pid uint32 `json:"pid"`
Status string `json:"processStatus"`
NetNSClosed bool `json:"netNamespaceClosed"`
Image string `json:"image"`
SnapshotKey string `json:"snapshotKey"`
Snapshotter string `json:"snapshotter"`
Runtime *criconfig.Runtime `json:"runtime"`
Config *runtime.PodSandboxConfig `json:"config"`
RuntimeSpec *runtimespec.Spec `json:"runtimeSpec"`
Pid uint32 `json:"pid"`
Status string `json:"processStatus"`
NetNSClosed bool `json:"netNamespaceClosed"`
Image string `json:"image"`
SnapshotKey string `json:"snapshotKey"`
Snapshotter string `json:"snapshotter"`
RuntimeHandler string `json:"runtimeHandler"`
Runtime *criconfig.Runtime `json:"runtime"`
Config *runtime.PodSandboxConfig `json:"config"`
RuntimeSpec *runtimespec.Spec `json:"runtimeSpec"`
}
// toCRISandboxInfo converts internal container object information to CRI sandbox status response info map.
@@ -132,9 +133,10 @@ func toCRISandboxInfo(ctx context.Context, sandbox sandboxstore.Sandbox) (map[st
}
si := &sandboxInfo{
Pid: sandbox.Status.Get().Pid,
Status: string(processStatus),
Config: sandbox.Config,
Pid: sandbox.Status.Get().Pid,
RuntimeHandler: sandbox.RuntimeHandler,
Status: string(processStatus),
Config: sandbox.Config,
}
if si.Status == "" {

View File

@@ -109,11 +109,20 @@ func (c *criService) stopSandboxContainer(ctx context.Context, sandbox sandboxst
}
// Kill the sandbox container.
err = task.Kill(ctx, unix.SIGKILL, containerd.WithKillAll)
if err != nil && !errdefs.IsNotFound(err) {
if err = task.Kill(ctx, unix.SIGKILL, containerd.WithKillAll); err != nil && !errdefs.IsNotFound(err) {
return errors.Wrap(err, "failed to kill sandbox container")
}
if err = c.waitSandboxStop(ctx, sandbox, killContainerTimeout); err == nil {
return nil
}
logrus.WithError(err).Errorf("An error occurs during waiting for sandbox %q to be killed", sandbox.ID)
// Kill the sandbox container init process.
if err = task.Kill(ctx, unix.SIGKILL); err != nil && !errdefs.IsNotFound(err) {
return errors.Wrap(err, "failed to kill sandbox container init process")
}
return c.waitSandboxStop(ctx, sandbox, killContainerTimeout)
}

View File

@@ -113,7 +113,7 @@ func NewCRIService(config criconfig.Config, client *containerd.Client) (CRIServi
os: osinterface.RealOS{},
sandboxStore: sandboxstore.NewStore(),
containerStore: containerstore.NewStore(),
imageStore: imagestore.NewStore(),
imageStore: imagestore.NewStore(client),
snapshotStore: snapshotstore.NewStore(),
sandboxNameIndex: registrar.NewRegistrar(),
containerNameIndex: registrar.NewRegistrar(),
@@ -157,7 +157,7 @@ func NewCRIService(config criconfig.Config, client *containerd.Client) (CRIServi
return nil, errors.Wrap(err, "failed to create stream server")
}
c.eventMonitor = newEventMonitor(c.containerStore, c.sandboxStore)
c.eventMonitor = newEventMonitor(c.containerStore, c.sandboxStore, c.imageStore)
return c, nil
}

View File

@@ -34,6 +34,36 @@ import (
ctrdutil "github.com/containerd/cri/pkg/containerd/util"
)
type streamListenerMode int
const (
x509KeyPairTLS streamListenerMode = iota
selfSignTLS
withoutTLS
)
func getStreamListenerMode(c *criService) (streamListenerMode, error) {
if c.config.EnableTLSStreaming {
if c.config.X509KeyPairStreaming.TLSCertFile != "" && c.config.X509KeyPairStreaming.TLSKeyFile != "" {
return x509KeyPairTLS, nil
}
if c.config.X509KeyPairStreaming.TLSCertFile != "" && c.config.X509KeyPairStreaming.TLSKeyFile == "" {
return -1, errors.New("must set X509KeyPairStreaming.TLSKeyFile")
}
if c.config.X509KeyPairStreaming.TLSCertFile == "" && c.config.X509KeyPairStreaming.TLSKeyFile != "" {
return -1, errors.New("must set X509KeyPairStreaming.TLSCertFile")
}
return selfSignTLS, nil
}
if c.config.X509KeyPairStreaming.TLSCertFile != "" {
return -1, errors.New("X509KeyPairStreaming.TLSCertFile is set but EnableTLSStreaming is not set")
}
if c.config.X509KeyPairStreaming.TLSKeyFile != "" {
return -1, errors.New("X509KeyPairStreaming.TLSKeyFile is set but EnableTLSStreaming is not set")
}
return withoutTLS, nil
}
func newStreamServer(c *criService, addr, port string) (streaming.Server, error) {
if addr == "" {
a, err := k8snet.ChooseBindAddress(nil)
@@ -44,8 +74,22 @@ func newStreamServer(c *criService, addr, port string) (streaming.Server, error)
}
config := streaming.DefaultConfig
config.Addr = net.JoinHostPort(addr, port)
runtime := newStreamRuntime(c)
if c.config.EnableTLSStreaming {
run := newStreamRuntime(c)
tlsMode, err := getStreamListenerMode(c)
if err != nil {
return nil, errors.Wrapf(err, "invalid stream server configuration")
}
switch tlsMode {
case x509KeyPairTLS:
tlsCert, err := tls.LoadX509KeyPair(c.config.X509KeyPairStreaming.TLSCertFile, c.config.X509KeyPairStreaming.TLSKeyFile)
if err != nil {
return nil, errors.Wrap(err, "failed to load x509 key pair for stream server")
}
config.TLSConfig = &tls.Config{
Certificates: []tls.Certificate{tlsCert},
}
return streaming.NewServer(config, run)
case selfSignTLS:
tlsCert, err := newTLSCert()
if err != nil {
return nil, errors.Wrap(err, "failed to generate tls certificate for stream server")
@@ -54,8 +98,12 @@ func newStreamServer(c *criService, addr, port string) (streaming.Server, error)
Certificates: []tls.Certificate{tlsCert},
InsecureSkipVerify: true,
}
return streaming.NewServer(config, run)
case withoutTLS:
return streaming.NewServer(config, run)
default:
return nil, errors.New("invalid configuration for the stream listener")
}
return streaming.NewServer(config, runtime)
}
type streamRuntime struct {

View File

@@ -0,0 +1,34 @@
/*
Copyright 2018 The Containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package image
import "github.com/pkg/errors"
// NewFakeStore returns an image store with predefined images.
// Update is not allowed for this fake store.
func NewFakeStore(images []Image) (*Store, error) {
s := NewStore(nil)
for _, i := range images {
for _, ref := range i.References {
s.refCache[ref] = i.ID
}
if err := s.store.add(i); err != nil {
return nil, errors.Wrapf(err, "add image %q", i)
}
}
return s, nil
}

View File

@@ -17,14 +17,21 @@ limitations under the License.
package image
import (
"context"
"encoding/json"
"sync"
"github.com/containerd/containerd"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/docker/distribution/digestset"
godigest "github.com/opencontainers/go-digest"
imagedigest "github.com/opencontainers/go-digest"
imageidentity "github.com/opencontainers/image-spec/identity"
imagespec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/containerd/cri/pkg/store"
storeutil "github.com/containerd/cri/pkg/store"
"github.com/containerd/cri/pkg/util"
)
// Image contains all resources associated with the image. All fields
@@ -32,10 +39,8 @@ import (
type Image struct {
// Id of the image. Normally the digest of image config.
ID string
// Other names by which this image is known.
RepoTags []string
// Digests by which this image is known.
RepoDigests []string
// References are references to the image, e.g. RepoTag and RepoDigest.
References []string
// ChainID is the chainID of the image.
ChainID string
// Size is the compressed size of the image.
@@ -48,28 +53,156 @@ type Image struct {
// Store stores all images.
type Store struct {
lock sync.RWMutex
// refCache is a containerd image reference to image id cache.
refCache map[string]string
// client is the containerd client.
client *containerd.Client
// store is the internal image store indexed by image id.
store *store
}
// NewStore creates an image store.
func NewStore(client *containerd.Client) *Store {
return &Store{
refCache: make(map[string]string),
client: client,
store: &store{
images: make(map[string]Image),
digestSet: digestset.NewSet(),
},
}
}
// Update updates cache for a reference.
func (s *Store) Update(ctx context.Context, ref string) error {
s.lock.Lock()
defer s.lock.Unlock()
i, err := s.client.GetImage(ctx, ref)
if err != nil && !errdefs.IsNotFound(err) {
return errors.Wrap(err, "get image from containerd")
}
var img *Image
if err == nil {
img, err = getImage(ctx, i)
if err != nil {
return errors.Wrap(err, "get image info from containerd")
}
}
return s.update(ref, img)
}
// update updates the internal cache. img == nil means that
// the image does not exist in containerd.
func (s *Store) update(ref string, img *Image) error {
oldID, oldExist := s.refCache[ref]
if img == nil {
// The image reference doesn't exist in containerd.
if oldExist {
// Remove the reference from the store.
s.store.delete(oldID, ref)
delete(s.refCache, ref)
}
return nil
}
if oldExist {
if oldID == img.ID {
return nil
}
// Updated. Remove tag from old image.
s.store.delete(oldID, ref)
}
// New image. Add new image.
s.refCache[ref] = img.ID
return s.store.add(*img)
}
// getImage gets image information from containerd.
func getImage(ctx context.Context, i containerd.Image) (*Image, error) {
// Get image information.
diffIDs, err := i.RootFS(ctx)
if err != nil {
return nil, errors.Wrap(err, "get image diffIDs")
}
chainID := imageidentity.ChainID(diffIDs)
size, err := i.Size(ctx)
if err != nil {
return nil, errors.Wrap(err, "get image compressed resource size")
}
desc, err := i.Config(ctx)
if err != nil {
return nil, errors.Wrap(err, "get image config descriptor")
}
id := desc.Digest.String()
rb, err := content.ReadBlob(ctx, i.ContentStore(), desc)
if err != nil {
return nil, errors.Wrap(err, "read image config from content store")
}
var ociimage imagespec.Image
if err := json.Unmarshal(rb, &ociimage); err != nil {
return nil, errors.Wrapf(err, "unmarshal image config %s", rb)
}
return &Image{
ID: id,
References: []string{i.Name()},
ChainID: chainID.String(),
Size: size,
ImageSpec: ociimage,
Image: i,
}, nil
}
// Resolve resolves a image reference to image id.
func (s *Store) Resolve(ref string) (string, error) {
s.lock.RLock()
defer s.lock.RUnlock()
id, ok := s.refCache[ref]
if !ok {
return "", storeutil.ErrNotExist
}
return id, nil
}
// Get gets image metadata by image id. The id can be truncated.
// Returns various validation errors if the image id is invalid.
// Returns storeutil.ErrNotExist if the image doesn't exist.
func (s *Store) Get(id string) (Image, error) {
return s.store.get(id)
}
// List lists all images.
func (s *Store) List() []Image {
return s.store.list()
}
type store struct {
lock sync.RWMutex
images map[string]Image
digestSet *digestset.Set
}
// NewStore creates an image store.
func NewStore() *Store {
return &Store{
images: make(map[string]Image),
digestSet: digestset.NewSet(),
func (s *store) list() []Image {
s.lock.RLock()
defer s.lock.RUnlock()
var images []Image
for _, i := range s.images {
images = append(images, i)
}
return images
}
// Add an image into the store.
func (s *Store) Add(img Image) error {
func (s *store) add(img Image) error {
s.lock.Lock()
defer s.lock.Unlock()
if _, err := s.digestSet.Lookup(img.ID); err != nil {
if err != digestset.ErrDigestNotFound {
return err
}
if err := s.digestSet.Add(godigest.Digest(img.ID)); err != nil {
if err := s.digestSet.Add(imagedigest.Digest(img.ID)); err != nil {
return err
}
}
@@ -80,44 +213,29 @@ func (s *Store) Add(img Image) error {
s.images[img.ID] = img
return nil
}
// Or else, merge the repo tags/digests.
i.RepoTags = mergeStringSlices(i.RepoTags, img.RepoTags)
i.RepoDigests = mergeStringSlices(i.RepoDigests, img.RepoDigests)
// Or else, merge the references.
i.References = util.MergeStringSlices(i.References, img.References)
s.images[img.ID] = i
return nil
}
// Get returns the image with specified id. Returns store.ErrNotExist if the
// image doesn't exist.
func (s *Store) Get(id string) (Image, error) {
func (s *store) get(id string) (Image, error) {
s.lock.RLock()
defer s.lock.RUnlock()
digest, err := s.digestSet.Lookup(id)
if err != nil {
if err == digestset.ErrDigestNotFound {
err = store.ErrNotExist
err = storeutil.ErrNotExist
}
return Image{}, err
}
if i, ok := s.images[digest.String()]; ok {
return i, nil
}
return Image{}, store.ErrNotExist
return Image{}, storeutil.ErrNotExist
}
// List lists all images.
func (s *Store) List() []Image {
s.lock.RLock()
defer s.lock.RUnlock()
var images []Image
for _, i := range s.images {
images = append(images, i)
}
return images
}
// Delete deletes the image with specified id.
func (s *Store) Delete(id string) {
func (s *store) delete(id, ref string) {
s.lock.Lock()
defer s.lock.Unlock()
digest, err := s.digestSet.Lookup(id)
@@ -126,22 +244,16 @@ func (s *Store) Delete(id string) {
// So we need to return if there are error.
return
}
i, ok := s.images[digest.String()]
if !ok {
return
}
i.References = util.SubtractStringSlice(i.References, ref)
if len(i.References) != 0 {
s.images[digest.String()] = i
return
}
// Remove the image if it is not referenced any more.
s.digestSet.Remove(digest) // nolint: errcheck
delete(s.images, digest.String())
}
// mergeStringSlices merges 2 string slices into one and remove duplicated elements.
func mergeStringSlices(a []string, b []string) []string {
set := map[string]struct{}{}
for _, s := range a {
set[s] = struct{}{}
}
for _, s := range b {
set[s] = struct{}{}
}
var ss []string
for s := range set {
ss = append(ss, s)
}
return ss
}

View File

@@ -54,6 +54,8 @@ type Metadata struct {
NetNSPath string
// IP of Pod if it is attached to non host network
IP string
// RuntimeHandler is the runtime handler name of the pod.
RuntimeHandler string
}
// MarshalJSON encodes Metadata into bytes in json format.

View File

@@ -16,9 +16,14 @@ limitations under the License.
package util
import "github.com/docker/docker/pkg/stringid"
import (
"encoding/hex"
"math/rand"
)
// GenerateID generates a random unique id.
func GenerateID() string {
return stringid.GenerateNonCryptoID()
b := make([]byte, 32)
rand.Read(b)
return hex.EncodeToString(b)
}

View File

@@ -41,3 +41,19 @@ func SubtractStringSlice(ss []string, str string) []string {
}
return res
}
// MergeStringSlices merges 2 string slices into one and remove duplicated elements.
func MergeStringSlices(a []string, b []string) []string {
set := map[string]struct{}{}
for _, s := range a {
set[s] = struct{}{}
}
for _, s := range b {
set[s] = struct{}{}
}
var ss []string
for s := range set {
ss = append(ss, s)
}
return ss
}

View File

@@ -1,19 +1,19 @@
github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
github.com/blang/semver v3.1.0
github.com/boltdb/bolt e9cf4fae01b5a8ff89d0ec6b32f0d9c9f79aefdd
github.com/boltdb/bolt v1.3.1
github.com/BurntSushi/toml a368813c5e648fee92e5f6c30e3944ff9d5e8895
github.com/containerd/cgroups 5e610833b72089b37d0e615de9a92dfc043757c2
github.com/containerd/console 4d8a41f4ce5b9bae77c41786ea2458330f43f081
github.com/containerd/containerd b9eeaa1ce83dd9970605ddbd0b35d4d3fa5f87bd
github.com/containerd/continuity d3c23511c1bf5851696cba83143d9cbcd666869b
github.com/containerd/console c12b1e7919c14469339a5d38f2f8ed9b64a9de23
github.com/containerd/containerd 1950f791d9225ffe061c77e74e292bcb3c428a04
github.com/containerd/continuity f44b615e492bdfb371aae2f76ec694d9da1db537
github.com/containerd/fifo 3d5202aec260678c48179c56f40e6f38a095738c
github.com/containerd/go-cni 5882530828ecf62032409b298a3e8b19e08b6534
github.com/containerd/go-runc edcf3de1f4971445c42d61f20d506b30612aa031
github.com/containerd/go-cni 6d7b509a054a3cb1c35ed1865d4fde2f0cb547cd
github.com/containerd/go-runc 5a6d9f37cfa36b15efba46dc7ea349fa9b7143c3
github.com/containerd/ttrpc 94dde388801693c54f88a6596f713b51a8b30b2d
github.com/containerd/typeurl a93fcdb778cd272c6e9b3028b2f42d813e785d40
github.com/containernetworking/cni v0.6.0
github.com/containernetworking/plugins v0.7.0
github.com/coreos/go-systemd 48702e0da86bd25e76cfef347e2adeb434a0d0a6
github.com/coreos/go-systemd v14
github.com/davecgh/go-spew v1.1.0
github.com/docker/distribution b38e5838b7b2f2ad48e06ec4b500011976080621
github.com/docker/docker 86f080cff0914e9694068ed78d503701667c4c00
@@ -21,29 +21,29 @@ github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
github.com/docker/go-metrics 4ea375f7759c82740c893fc030bc37088d2ec098
github.com/docker/go-units v0.3.1
github.com/docker/spdystream 449fdfce4d962303d702fec724ef0ad181c92528
github.com/emicklei/go-restful ff4f55a206334ef123e4f79bbf348980da81ca46
github.com/ghodss/yaml 73d445a93680fa1a78ae23a5839bad48f32ba1ee
github.com/godbus/dbus c7fdd8b5cd55e87b4e1f4e372cdb1db61dd6c66f
github.com/emicklei/go-restful v2.2.1
github.com/ghodss/yaml v1.0.0
github.com/godbus/dbus v3
github.com/gogo/googleapis 08a7655d27152912db7aaf4f983275eaf8d128ef
github.com/gogo/protobuf v1.0.0
github.com/golang/glog 44145f04b68cf362d9c4df2182967c2275eaefed
github.com/golang/protobuf v1.1.0
github.com/google/gofuzz 44d81051d367757e1c7c6a5a86423ece9afcf63c
github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0
github.com/grpc-ecosystem/go-grpc-prometheus v1.1
github.com/hashicorp/errwrap 7554cd9344cec97297fa6649b055a8c98c2a1e55
github.com/hashicorp/go-multierror ed905158d87462226a13fe39ddf685ea65f1c11f
github.com/json-iterator/go f2b4162afba35581b6d4a50d3b8f34e33c144682
github.com/json-iterator/go 1.1.5
github.com/matttproud/golang_protobuf_extensions v1.0.0
github.com/Microsoft/go-winio v0.4.7
github.com/Microsoft/hcsshim v0.6.11
github.com/Microsoft/go-winio v0.4.10
github.com/Microsoft/hcsshim 44c060121b68e8bdc40b411beba551f3b4ee9e55
github.com/modern-go/concurrent 1.0.3
github.com/modern-go/reflect2 05fbef0ca5da472bbf96c9322b84a53edc03c9fd
github.com/modern-go/reflect2 1.0.1
github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7
github.com/opencontainers/image-spec v1.0.1
github.com/opencontainers/runc 69663f0bd4b60df09991c08812a60108003fa340
github.com/opencontainers/runc 20aff4f0488c6d4b8df4d85b4f63f1f704c11abd
github.com/opencontainers/runtime-spec d810dbc60d8c5aeeb3d054bd1132fab2121968ce
github.com/opencontainers/runtime-tools v0.6.0
github.com/opencontainers/selinux 4a2974bf1ee960774ffd517717f1f45325af0206
github.com/opencontainers/selinux b6fa367ed7f534f9ba25391cc2d467085dbb445a
github.com/pkg/errors v0.8.0
github.com/pmezard/go-difflib v1.0.0
github.com/prometheus/client_golang f4fb1b73fb099f396a7f0036bf86aa8def4ed823
@@ -54,13 +54,14 @@ github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0
github.com/sirupsen/logrus v1.0.0
github.com/stretchr/testify v1.1.4
github.com/syndtr/gocapability db04d3cc01c8b54962a58ec7e491717d06cfcc16
github.com/tchap/go-patricia 5ad6cdb7538b0097d5598c7e57f0a24072adf7dc
github.com/tchap/go-patricia v2.2.6
github.com/urfave/cli 7bc6a0acffa589f415f88aca16cc1de5ffd66f9c
github.com/xeipuuv/gojsonpointer 4e3ac2762d5f479393488629ee9370b50873b3a6
github.com/xeipuuv/gojsonreference bd5ef7bd5415a7ac448318e64f11a24cd21e594b
github.com/xeipuuv/gojsonschema 1d523034197ff1f222f6429836dd36a2457a1874
golang.org/x/crypto 49796115aa4b964c318aad4f3084fdb41e9aa067
golang.org/x/net b3756b4b77d7b13260a0a2ec658753cf48922eac
golang.org/x/oauth2 a6bd8cefa1811bd24b86f8902872e4e8225f74c4
golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c
golang.org/x/sys 1b2967e3c290b7c545b3db0deeda16e9be4f98a2 https://github.com/golang/sys
golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4
@@ -69,9 +70,9 @@ google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
google.golang.org/grpc v1.12.0
gopkg.in/inf.v0 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4
gopkg.in/yaml.v2 53feefa2559fb8dfa8d81baad31be332c97d6c77
k8s.io/api 9e5ffd1f1320950b238cfce291b926411f0af722
k8s.io/apimachinery ed135c5b96450fd24e5e981c708114fbbd950697
k8s.io/apiserver a90e3a95c2e91b944bfca8225c4e0d12e42a9eb5
k8s.io/client-go 03bfb9bdcfe5482795b999f39ca3ed9ad42ce5bb
k8s.io/kubernetes v1.11.0
k8s.io/utils 733eca437aa39379e4bcc25e726439dfca40fcff
k8s.io/api 012f271b5d41baad56190c5f1ae19bff16df0fd8
k8s.io/apimachinery 6429050ef506887d121f3e7306e894f8900d8a63
k8s.io/apiserver e9312c15296b6c2c923ebd5031ff5d1d5fd022d7
k8s.io/client-go 37c3c02ec96533daec0dbda1f39a6b1d68505c79
k8s.io/kubernetes v1.12.0-beta.1
k8s.io/utils 982821ea41da7e7c15f3d3738921eb2e7e241ccd

View File

@@ -136,19 +136,37 @@ func WithConfListFile(fileName string) CNIOpt {
if err != nil {
return err
}
i := len(c.networks)
c.networks = append(c.networks, &Network{
cni: c.cniConfig,
config: confList,
ifName: getIfName(c.prefix, 0),
ifName: getIfName(c.prefix, i),
})
return nil
}
}
// WithDefaultConf can be used to detect network config
// WithDefaultConf can be used to detect the default network
// config file from the configured cni config directory and load
// it.
// Since the CNI spec does not specify a way to detect default networks,
// the convention chosen is - the first network configuration in the sorted
// list of network conf files as the default network.
func WithDefaultConf(c *libcni) error {
return loadFromConfDir(c, 1)
}
// WithAllConf can be used to detect all network config
// files from the configured cni config directory and load
// them.
func WithDefaultConf(c *libcni) error {
func WithAllConf(c *libcni) error {
return loadFromConfDir(c, 0)
}
// loadFromConfDir detects network config files from the
// configured cni config directory and load them. max is
// the maximum network config to load (max i<= 0 means no limit).
func loadFromConfDir(c *libcni, max int) error {
files, err := cnilibrary.ConfFiles(c.pluginConfDir, []string{".conf", ".conflist", ".json"})
switch {
case err != nil:
@@ -166,6 +184,7 @@ func WithDefaultConf(c *libcni) error {
// interface provided during init as the network interface for this default
// network. For every other network use a generated interface id.
i := 0
var networks []*Network
for _, confFile := range files {
var confList *cnilibrary.NetworkConfigList
if strings.HasSuffix(confFile, ".conflist") {
@@ -193,15 +212,19 @@ func WithDefaultConf(c *libcni) error {
return errors.Wrapf(ErrInvalidConfig, "CNI config list %s has no networks, skipping", confFile)
}
c.networks = append(c.networks, &Network{
networks = append(networks, &Network{
cni: c.cniConfig,
config: confList,
ifName: getIfName(c.prefix, i),
})
i++
if i == max {
break
}
}
if len(c.networks) == 0 {
if len(networks) == 0 {
return errors.Wrapf(ErrCNINotInitialized, "no valid networks found in %s", c.pluginDirs)
}
c.networks = append(c.networks, networks...)
return nil
}

View File

@@ -1 +0,0 @@
This package provides helper functions for dealing with string identifiers

View File

@@ -1,99 +0,0 @@
// Package stringid provides helper functions for dealing with string identifiers
package stringid
import (
cryptorand "crypto/rand"
"encoding/hex"
"fmt"
"io"
"math"
"math/big"
"math/rand"
"regexp"
"strconv"
"strings"
"time"
)
const shortLen = 12
var (
validShortID = regexp.MustCompile("^[a-f0-9]{12}$")
validHex = regexp.MustCompile(`^[a-f0-9]{64}$`)
)
// IsShortID determines if an arbitrary string *looks like* a short ID.
func IsShortID(id string) bool {
return validShortID.MatchString(id)
}
// TruncateID returns a shorthand version of a string identifier for convenience.
// A collision with other shorthands is very unlikely, but possible.
// In case of a collision a lookup with TruncIndex.Get() will fail, and the caller
// will need to use a longer prefix, or the full-length Id.
func TruncateID(id string) string {
if i := strings.IndexRune(id, ':'); i >= 0 {
id = id[i+1:]
}
if len(id) > shortLen {
id = id[:shortLen]
}
return id
}
func generateID(r io.Reader) string {
b := make([]byte, 32)
for {
if _, err := io.ReadFull(r, b); err != nil {
panic(err) // This shouldn't happen
}
id := hex.EncodeToString(b)
// if we try to parse the truncated for as an int and we don't have
// an error then the value is all numeric and causes issues when
// used as a hostname. ref #3869
if _, err := strconv.ParseInt(TruncateID(id), 10, 64); err == nil {
continue
}
return id
}
}
// GenerateRandomID returns a unique id.
func GenerateRandomID() string {
return generateID(cryptorand.Reader)
}
// GenerateNonCryptoID generates unique id without using cryptographically
// secure sources of random.
// It helps you to save entropy.
func GenerateNonCryptoID() string {
return generateID(readerFunc(rand.Read))
}
// ValidateID checks whether an ID string is a valid image ID.
func ValidateID(id string) error {
if ok := validHex.MatchString(id); !ok {
return fmt.Errorf("image ID %q is invalid", id)
}
return nil
}
func init() {
// safely set the seed globally so we generate random ids. Tries to use a
// crypto seed before falling back to time.
var seed int64
if cryptoseed, err := cryptorand.Int(cryptorand.Reader, big.NewInt(math.MaxInt64)); err != nil {
// This should not happen, but worst-case fallback to time-based seed.
seed = time.Now().UnixNano()
} else {
seed = cryptoseed.Int64()
}
rand.Seed(seed)
}
type readerFunc func(p []byte) (int, error)
func (fn readerFunc) Read(p []byte) (int, error) {
return fn(p)
}

View File

@@ -67,8 +67,9 @@ func (u UserResource) findUser(request *restful.Request, response *restful.Respo
- [Example posted on blog](http://ernestmicklei.com/2012/11/go-restful-first-working-example/)
- [Design explained on blog](http://ernestmicklei.com/2012/11/go-restful-api-design/)
- [sourcegraph](https://sourcegraph.com/github.com/emicklei/go-restful)
- [showcase: Zazkia - tcp proxy for testing resiliency](https://github.com/emicklei/zazkia)
- [showcase: Mora - MongoDB REST Api server](https://github.com/emicklei/mora)
Type ```git shortlog -s``` for a full list of contributors.
© 2012 - 2017, http://ernestmicklei.com. MIT License. Contributions are welcome.
© 2012 - 2017, http://ernestmicklei.com. MIT License. Contributions are welcome.

View File

@@ -15,20 +15,20 @@ type CompressorProvider interface {
// Before using it, call Reset().
AcquireGzipWriter() *gzip.Writer
// Releases an aqcuired *gzip.Writer.
// Releases an acquired *gzip.Writer.
ReleaseGzipWriter(w *gzip.Writer)
// Returns a *gzip.Reader which needs to be released later.
AcquireGzipReader() *gzip.Reader
// Releases an aqcuired *gzip.Reader.
// Releases an acquired *gzip.Reader.
ReleaseGzipReader(w *gzip.Reader)
// Returns a *zlib.Writer which needs to be released later.
// Before using it, call Reset().
AcquireZlibWriter() *zlib.Writer
// Releases an aqcuired *zlib.Writer.
// Releases an acquired *zlib.Writer.
ReleaseZlibWriter(w *zlib.Writer)
}
@@ -45,7 +45,7 @@ func CurrentCompressorProvider() CompressorProvider {
return currentCompressorProvider
}
// CompressorProvider sets the actual provider of compressors (zlib or gzip).
// SetCompressorProvider sets the actual provider of compressors (zlib or gzip).
func SetCompressorProvider(p CompressorProvider) {
if p == nil {
panic("cannot set compressor provider to nil")

View File

@@ -140,7 +140,7 @@ func (c *Container) addHandler(service *WebService, serveMux *http.ServeMux) boo
func (c *Container) Remove(ws *WebService) error {
if c.ServeMux == http.DefaultServeMux {
errMsg := fmt.Sprintf("[restful] cannot remove a WebService from a Container using the DefaultServeMux: ['%v']", ws)
log.Printf(errMsg)
log.Print(errMsg)
return errors.New(errMsg)
}
c.webServicesLock.Lock()

View File

@@ -21,7 +21,7 @@ func TraceLogger(logger log.StdLogger) {
EnableTracing(logger != nil)
}
// expose the setter for the global logger on the top-level package
// SetLogger exposes the setter for the global logger on the top-level package
func SetLogger(customLogger log.StdLogger) {
log.SetLogger(customLogger)
}

View File

@@ -21,7 +21,7 @@ type Response struct {
http.ResponseWriter
requestAccept string // mime-type what the Http Request says it wants to receive
routeProduces []string // mime-types what the Route says it can produce
statusCode int // HTTP status code that has been written explicity (if zero then net/http has written 200)
statusCode int // HTTP status code that has been written explicitly (if zero then net/http has written 200)
contentLength int // number of bytes written for the response body
prettyPrint bool // controls the indentation feature of XML and JSON serialization. It is initialized using var PrettyPrintResponses.
err error // err property is kept when WriteError is called

View File

@@ -100,7 +100,7 @@ func (r Route) matchesContentType(mimeTypes string) bool {
}
if len(mimeTypes) == 0 {
// idempotent methods with (most-likely or garanteed) empty content match missing Content-Type
// idempotent methods with (most-likely or guaranteed) empty content match missing Content-Type
m := r.Method
if m == "GET" || m == "HEAD" || m == "OPTIONS" || m == "DELETE" || m == "TRACE" {
return true

View File

@@ -89,7 +89,7 @@ func (b *RouteBuilder) Doc(documentation string) *RouteBuilder {
return b
}
// A verbose explanation of the operation behavior. Optional.
// Notes is a verbose explanation of the operation behavior. Optional.
func (b *RouteBuilder) Notes(notes string) *RouteBuilder {
b.notes = notes
return b

View File

@@ -233,7 +233,7 @@ func (w *WebService) RootPath() string {
return w.rootPath
}
// PathParameters return the path parameter names for (shared amoung its Routes)
// PathParameters return the path parameter names for (shared among its Routes)
func (w *WebService) PathParameters() []*Parameter {
return w.pathParameters
}

View File

@@ -4,13 +4,13 @@
## Introduction
A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs.
A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs.
In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/).
## Compatibility
This package uses [go-yaml v2](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility).
This package uses [go-yaml](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility).
## Caveats
@@ -44,6 +44,8 @@ import "github.com/ghodss/yaml"
Usage is very similar to the JSON library:
```go
package main
import (
"fmt"
@@ -51,8 +53,8 @@ import (
)
type Person struct {
Name string `json:"name"` // Affects YAML field names too.
Age int `json:"name"`
Name string `json:"name"` // Affects YAML field names too.
Age int `json:"age"`
}
func main() {
@@ -65,13 +67,13 @@ func main() {
}
fmt.Println(string(y))
/* Output:
name: John
age: 30
name: John
*/
// Unmarshal the YAML back into a Person struct.
var p2 Person
err := yaml.Unmarshal(y, &p2)
err = yaml.Unmarshal(y, &p2)
if err != nil {
fmt.Printf("err: %v\n", err)
return
@@ -86,11 +88,14 @@ func main() {
`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available:
```go
package main
import (
"fmt"
"github.com/ghodss/yaml"
)
func main() {
j := []byte(`{"name": "John", "age": 30}`)
y, err := yaml.JSONToYAML(j)

View File

@@ -45,7 +45,11 @@ func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.Te
break
}
if v.IsNil() {
v.Set(reflect.New(v.Type().Elem()))
if v.CanSet() {
v.Set(reflect.New(v.Type().Elem()))
} else {
v = reflect.New(v.Type().Elem())
}
}
if v.Type().NumMethod() > 0 {
if u, ok := v.Interface().(json.Unmarshaler); ok {

View File

@@ -15,12 +15,12 @@ import (
func Marshal(o interface{}) ([]byte, error) {
j, err := json.Marshal(o)
if err != nil {
return nil, fmt.Errorf("error marshaling into JSON: ", err)
return nil, fmt.Errorf("error marshaling into JSON: %v", err)
}
y, err := JSONToYAML(j)
if err != nil {
return nil, fmt.Errorf("error converting JSON to YAML: ", err)
return nil, fmt.Errorf("error converting JSON to YAML: %v", err)
}
return y, nil
@@ -48,7 +48,7 @@ func JSONToYAML(j []byte) ([]byte, error) {
var jsonObj interface{}
// We are using yaml.Unmarshal here (instead of json.Unmarshal) because the
// Go JSON library doesn't try to pick the right number type (int, float,
// etc.) when unmarshling to interface{}, it just picks float64
// etc.) when unmarshalling to interface{}, it just picks float64
// universally. go-yaml does go through the effort of picking the right
// number type, so we can preserve number type throughout this process.
err := yaml.Unmarshal(j, &jsonObj)

View File

@@ -81,10 +81,12 @@ func (adapter *Decoder) More() bool {
if iter.Error != nil {
return false
}
if iter.head != iter.tail {
return true
c := iter.nextToken()
if c == 0 {
return false
}
return iter.loadMore()
iter.unreadByte()
return c != ']' && c != '}'
}
// Buffered remaining buffer
@@ -98,7 +100,7 @@ func (adapter *Decoder) Buffered() io.Reader {
func (adapter *Decoder) UseNumber() {
cfg := adapter.iter.cfg.configBeforeFrozen
cfg.UseNumber = true
adapter.iter.cfg = cfg.frozeWithCacheReuse()
adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions)
}
// DisallowUnknownFields causes the Decoder to return an error when the destination
@@ -107,7 +109,7 @@ func (adapter *Decoder) UseNumber() {
func (adapter *Decoder) DisallowUnknownFields() {
cfg := adapter.iter.cfg.configBeforeFrozen
cfg.DisallowUnknownFields = true
adapter.iter.cfg = cfg.frozeWithCacheReuse()
adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions)
}
// NewEncoder same as json.NewEncoder
@@ -132,14 +134,14 @@ func (adapter *Encoder) Encode(val interface{}) error {
func (adapter *Encoder) SetIndent(prefix, indent string) {
config := adapter.stream.cfg.configBeforeFrozen
config.IndentionStep = len(indent)
adapter.stream.cfg = config.frozeWithCacheReuse()
adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions)
}
// SetEscapeHTML escape html by default, set to false to disable
func (adapter *Encoder) SetEscapeHTML(escapeHTML bool) {
config := adapter.stream.cfg.configBeforeFrozen
config.EscapeHTML = escapeHTML
adapter.stream.cfg = config.frozeWithCacheReuse()
adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions)
}
// Valid reports whether data is a valid JSON encoding.

View File

@@ -74,7 +74,9 @@ type frozenConfig struct {
disallowUnknownFields bool
decoderCache *concurrent.Map
encoderCache *concurrent.Map
extensions []Extension
encoderExtension Extension
decoderExtension Extension
extraExtensions []Extension
streamPool *sync.Pool
iteratorPool *sync.Pool
caseSensitive bool
@@ -158,22 +160,21 @@ func (cfg Config) Froze() API {
if cfg.ValidateJsonRawMessage {
api.validateJsonRawMessage(encoderExtension)
}
if len(encoderExtension) > 0 {
api.extensions = append(api.extensions, encoderExtension)
}
if len(decoderExtension) > 0 {
api.extensions = append(api.extensions, decoderExtension)
}
api.encoderExtension = encoderExtension
api.decoderExtension = decoderExtension
api.configBeforeFrozen = cfg
return api
}
func (cfg Config) frozeWithCacheReuse() *frozenConfig {
func (cfg Config) frozeWithCacheReuse(extraExtensions []Extension) *frozenConfig {
api := getFrozenConfigFromCache(cfg)
if api != nil {
return api
}
api = cfg.Froze().(*frozenConfig)
for _, extension := range extraExtensions {
api.RegisterExtension(extension)
}
addFrozenConfigToCache(cfg, api)
return api
}
@@ -190,7 +191,7 @@ func (cfg *frozenConfig) validateJsonRawMessage(extension EncoderExtension) {
stream.WriteRaw(string(rawMessage))
}
}, func(ptr unsafe.Pointer) bool {
return false
return len(*((*json.RawMessage)(ptr))) == 0
}}
extension[reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem()] = encoder
extension[reflect2.TypeOfPtr((*RawMessage)(nil)).Elem()] = encoder
@@ -219,7 +220,9 @@ func (cfg *frozenConfig) getTagKey() string {
}
func (cfg *frozenConfig) RegisterExtension(extension Extension) {
cfg.extensions = append(cfg.extensions, extension)
cfg.extraExtensions = append(cfg.extraExtensions, extension)
copied := cfg.configBeforeFrozen
cfg.configBeforeFrozen = copied
}
type lossyFloat32Encoder struct {
@@ -314,7 +317,7 @@ func (cfg *frozenConfig) MarshalIndent(v interface{}, prefix, indent string) ([]
}
newCfg := cfg.configBeforeFrozen
newCfg.IndentionStep = len(indent)
return newCfg.frozeWithCacheReuse().Marshal(v)
return newCfg.frozeWithCacheReuse(cfg.extraExtensions).Marshal(v)
}
func (cfg *frozenConfig) UnmarshalFromString(str string, v interface{}) error {

View File

@@ -2,7 +2,7 @@ package jsoniter
import (
"fmt"
"unicode"
"strings"
)
// ReadObject read one field from object.
@@ -96,13 +96,12 @@ func (iter *Iterator) readFieldHash() int64 {
}
func calcHash(str string, caseSensitive bool) int64 {
if !caseSensitive {
str = strings.ToLower(str)
}
hash := int64(0x811c9dc5)
for _, b := range str {
if caseSensitive {
hash ^= int64(b)
} else {
hash ^= int64(unicode.ToLower(b))
}
for _, b := range []byte(str) {
hash ^= int64(b)
hash *= 0x1000193
}
return int64(hash)

View File

@@ -120,7 +120,8 @@ func decoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder {
for _, extension := range extensions {
decoder = extension.DecorateDecoder(typ, decoder)
}
for _, extension := range ctx.extensions {
decoder = ctx.decoderExtension.DecorateDecoder(typ, decoder)
for _, extension := range ctx.extraExtensions {
decoder = extension.DecorateDecoder(typ, decoder)
}
return decoder
@@ -222,7 +223,8 @@ func encoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder {
for _, extension := range extensions {
encoder = extension.DecorateEncoder(typ, encoder)
}
for _, extension := range ctx.extensions {
encoder = ctx.encoderExtension.DecorateEncoder(typ, encoder)
for _, extension := range ctx.extraExtensions {
encoder = extension.DecorateEncoder(typ, encoder)
}
return encoder

View File

@@ -246,7 +246,8 @@ func getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder {
for _, extension := range extensions {
decoder = extension.DecorateDecoder(typ, decoder)
}
for _, extension := range ctx.extensions {
decoder = ctx.decoderExtension.DecorateDecoder(typ, decoder)
for _, extension := range ctx.extraExtensions {
decoder = extension.DecorateDecoder(typ, decoder)
}
}
@@ -259,14 +260,18 @@ func _getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder {
return decoder
}
}
for _, extension := range ctx.extensions {
decoder := ctx.decoderExtension.CreateDecoder(typ)
if decoder != nil {
return decoder
}
for _, extension := range ctx.extraExtensions {
decoder := extension.CreateDecoder(typ)
if decoder != nil {
return decoder
}
}
typeName := typ.String()
decoder := typeDecoders[typeName]
decoder = typeDecoders[typeName]
if decoder != nil {
return decoder
}
@@ -286,7 +291,8 @@ func getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder {
for _, extension := range extensions {
encoder = extension.DecorateEncoder(typ, encoder)
}
for _, extension := range ctx.extensions {
encoder = ctx.encoderExtension.DecorateEncoder(typ, encoder)
for _, extension := range ctx.extraExtensions {
encoder = extension.DecorateEncoder(typ, encoder)
}
}
@@ -300,14 +306,18 @@ func _getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder {
return encoder
}
}
for _, extension := range ctx.extensions {
encoder := ctx.encoderExtension.CreateEncoder(typ)
if encoder != nil {
return encoder
}
for _, extension := range ctx.extraExtensions {
encoder := extension.CreateEncoder(typ)
if encoder != nil {
return encoder
}
}
typeName := typ.String()
encoder := typeEncoders[typeName]
encoder = typeEncoders[typeName]
if encoder != nil {
return encoder
}
@@ -393,7 +403,9 @@ func createStructDescriptor(ctx *ctx, typ reflect2.Type, bindings []*Binding, em
for _, extension := range extensions {
extension.UpdateStructDescriptor(structDescriptor)
}
for _, extension := range ctx.extensions {
ctx.encoderExtension.UpdateStructDescriptor(structDescriptor)
ctx.decoderExtension.UpdateStructDescriptor(structDescriptor)
for _, extension := range ctx.extraExtensions {
extension.UpdateStructDescriptor(structDescriptor)
}
processTags(structDescriptor, ctx.frozenConfig)

View File

@@ -39,7 +39,11 @@ func encoderOfMap(ctx *ctx, typ reflect2.Type) ValEncoder {
}
func decoderOfMapKey(ctx *ctx, typ reflect2.Type) ValDecoder {
for _, extension := range ctx.extensions {
decoder := ctx.decoderExtension.CreateMapKeyDecoder(typ)
if decoder != nil {
return decoder
}
for _, extension := range ctx.extraExtensions {
decoder := extension.CreateMapKeyDecoder(typ)
if decoder != nil {
return decoder
@@ -77,7 +81,11 @@ func decoderOfMapKey(ctx *ctx, typ reflect2.Type) ValDecoder {
}
func encoderOfMapKey(ctx *ctx, typ reflect2.Type) ValEncoder {
for _, extension := range ctx.extensions {
encoder := ctx.encoderExtension.CreateMapKeyEncoder(typ)
if encoder != nil {
return encoder
}
for _, extension := range ctx.extraExtensions {
encoder := extension.CreateMapKeyEncoder(typ)
if encoder != nil {
return encoder

View File

@@ -150,6 +150,9 @@ func (cfg *frozenConfig) TypeOf(obj interface{}) Type {
}
func (cfg *frozenConfig) Type2(type1 reflect.Type) Type {
if type1 == nil {
return nil
}
cacheKey := uintptr(unpackEFace(type1).data)
typeObj, found := cfg.cache.Load(cacheKey)
if found {

View File

@@ -49,8 +49,10 @@ func InitLabels(options []string) (string, string, error) {
mcon[con[0]] = con[1]
}
}
_ = ReleaseLabel(processLabel)
processLabel = pcon.Get()
mountLabel = mcon.Get()
_ = ReserveLabel(processLabel)
}
return processLabel, mountLabel, nil
}
@@ -85,9 +87,6 @@ func FormatMountLabel(src, mountLabel string) string {
// SetProcessLabel takes a process label and tells the kernel to assign the
// label to the next program executed by the current process.
func SetProcessLabel(processLabel string) error {
if processLabel == "" {
return nil
}
return selinux.SetExecLabel(processLabel)
}
@@ -131,7 +130,7 @@ func Relabel(path string, fileLabel string, shared bool) error {
return nil
}
exclude_paths := map[string]bool{"/": true, "/usr": true, "/etc": true}
exclude_paths := map[string]bool{"/": true, "/usr": true, "/etc": true, "/tmp": true, "/home": true, "/run": true, "/var": true, "/root": true}
if exclude_paths[path] {
return fmt.Errorf("SELinux relabeling of %s is not allowed", path)
}

View File

@@ -1,13 +1,16 @@
// +build linux
// +build selinux,linux
package selinux
import (
"bufio"
"bytes"
"crypto/rand"
"encoding/binary"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"regexp"
@@ -23,14 +26,16 @@ const (
// Permissive constant to indicate SELinux is in permissive mode
Permissive = 0
// Disabled constant to indicate SELinux is disabled
Disabled = -1
Disabled = -1
selinuxDir = "/etc/selinux/"
selinuxConfig = selinuxDir + "config"
selinuxfsMount = "/sys/fs/selinux"
selinuxTypeTag = "SELINUXTYPE"
selinuxTag = "SELINUX"
selinuxPath = "/sys/fs/selinux"
xattrNameSelinux = "security.selinux"
stRdOnly = 0x01
selinuxfsMagic = 0xf97cff8c
)
type selinuxState struct {
@@ -43,7 +48,13 @@ type selinuxState struct {
}
var (
// ErrMCSAlreadyExists is returned when trying to allocate a duplicate MCS.
ErrMCSAlreadyExists = errors.New("MCS label already exists")
// ErrEmptyPath is returned when an empty path has been specified.
ErrEmptyPath = errors.New("empty path")
assignRegex = regexp.MustCompile(`^([^=]+)=(.*)$`)
roFileLabel string
state = selinuxState{
mcsList: make(map[string]bool),
}
@@ -91,6 +102,83 @@ func (s *selinuxState) setSELinuxfs(selinuxfs string) string {
return s.selinuxfs
}
func verifySELinuxfsMount(mnt string) bool {
var buf syscall.Statfs_t
for {
err := syscall.Statfs(mnt, &buf)
if err == nil {
break
}
if err == syscall.EAGAIN {
continue
}
return false
}
if uint32(buf.Type) != uint32(selinuxfsMagic) {
return false
}
if (buf.Flags & stRdOnly) != 0 {
return false
}
return true
}
func findSELinuxfs() string {
// fast path: check the default mount first
if verifySELinuxfsMount(selinuxfsMount) {
return selinuxfsMount
}
// check if selinuxfs is available before going the slow path
fs, err := ioutil.ReadFile("/proc/filesystems")
if err != nil {
return ""
}
if !bytes.Contains(fs, []byte("\tselinuxfs\n")) {
return ""
}
// slow path: try to find among the mounts
f, err := os.Open("/proc/self/mountinfo")
if err != nil {
return ""
}
defer f.Close()
scanner := bufio.NewScanner(f)
for {
mnt := findSELinuxfsMount(scanner)
if mnt == "" { // error or not found
return ""
}
if verifySELinuxfsMount(mnt) {
return mnt
}
}
}
// findSELinuxfsMount returns a next selinuxfs mount point found,
// if there is one, or an empty string in case of EOF or error.
func findSELinuxfsMount(s *bufio.Scanner) string {
for s.Scan() {
txt := s.Text()
// The first field after - is fs type.
// Safe as spaces in mountpoints are encoded as \040
if !strings.Contains(txt, " - selinuxfs ") {
continue
}
const mPos = 5 // mount point is 5th field
fields := strings.SplitN(txt, " ", mPos+1)
if len(fields) < mPos+1 {
continue
}
return fields[mPos-1]
}
return ""
}
func (s *selinuxState) getSELinuxfs() string {
s.Lock()
selinuxfs := s.selinuxfs
@@ -100,40 +188,7 @@ func (s *selinuxState) getSELinuxfs() string {
return selinuxfs
}
selinuxfs = ""
f, err := os.Open("/proc/self/mountinfo")
if err != nil {
return selinuxfs
}
defer f.Close()
scanner := bufio.NewScanner(f)
for scanner.Scan() {
txt := scanner.Text()
// Safe as mountinfo encodes mountpoints with spaces as \040.
sepIdx := strings.Index(txt, " - ")
if sepIdx == -1 {
continue
}
if !strings.Contains(txt[sepIdx:], "selinuxfs") {
continue
}
fields := strings.Split(txt, " ")
if len(fields) < 5 {
continue
}
selinuxfs = fields[4]
break
}
if selinuxfs != "" {
var buf syscall.Statfs_t
syscall.Statfs(selinuxfs, &buf)
if (buf.Flags & stRdOnly) == 1 {
selinuxfs = ""
}
}
return s.setSELinuxfs(selinuxfs)
return s.setSELinuxfs(findSELinuxfs())
}
// getSelinuxMountPoint returns the path to the mountpoint of an selinuxfs
@@ -150,7 +205,7 @@ func GetEnabled() bool {
return state.getEnabled()
}
func readConfig(target string) (value string) {
func readConfig(target string) string {
var (
val, key string
bufin *bufio.Reader
@@ -192,30 +247,42 @@ func readConfig(target string) (value string) {
}
func getSELinuxPolicyRoot() string {
return selinuxDir + readConfig(selinuxTypeTag)
return filepath.Join(selinuxDir, readConfig(selinuxTypeTag))
}
func readCon(name string) (string, error) {
var val string
func readCon(fpath string) (string, error) {
if fpath == "" {
return "", ErrEmptyPath
}
in, err := os.Open(name)
in, err := os.Open(fpath)
if err != nil {
return "", err
}
defer in.Close()
_, err = fmt.Fscanf(in, "%s", &val)
return val, err
var retval string
if _, err := fmt.Fscanf(in, "%s", &retval); err != nil {
return "", err
}
return strings.Trim(retval, "\x00"), nil
}
// SetFileLabel sets the SELinux label for this path or returns an error.
func SetFileLabel(path string, label string) error {
return lsetxattr(path, xattrNameSelinux, []byte(label), 0)
func SetFileLabel(fpath string, label string) error {
if fpath == "" {
return ErrEmptyPath
}
return lsetxattr(fpath, xattrNameSelinux, []byte(label), 0)
}
// FileLabel returns the SELinux label for this path or returns an error.
func FileLabel(path string) (string, error) {
label, err := lgetxattr(path, xattrNameSelinux)
func FileLabel(fpath string) (string, error) {
if fpath == "" {
return "", ErrEmptyPath
}
label, err := lgetxattr(fpath, xattrNameSelinux)
if err != nil {
return "", err
}
@@ -260,8 +327,12 @@ func ExecLabel() (string, error) {
return readCon(fmt.Sprintf("/proc/self/task/%d/attr/exec", syscall.Gettid()))
}
func writeCon(name string, val string) error {
out, err := os.OpenFile(name, os.O_WRONLY, 0)
func writeCon(fpath string, val string) error {
if fpath == "" {
return ErrEmptyPath
}
out, err := os.OpenFile(fpath, os.O_WRONLY, 0)
if err != nil {
return err
}
@@ -275,6 +346,37 @@ func writeCon(name string, val string) error {
return err
}
/*
CanonicalizeContext takes a context string and writes it to the kernel
the function then returns the context that the kernel will use. This function
can be used to see if two contexts are equivalent
*/
func CanonicalizeContext(val string) (string, error) {
return readWriteCon(filepath.Join(getSelinuxMountPoint(), "context"), val)
}
func readWriteCon(fpath string, val string) (string, error) {
if fpath == "" {
return "", ErrEmptyPath
}
f, err := os.OpenFile(fpath, os.O_RDWR, 0)
if err != nil {
return "", err
}
defer f.Close()
_, err = f.Write([]byte(val))
if err != nil {
return "", err
}
var retval string
if _, err := fmt.Fscanf(f, "%s", &retval); err != nil {
return "", err
}
return strings.Trim(retval, "\x00"), nil
}
/*
SetExecLabel sets the SELinux label that the kernel will use for any programs
that are executed by the current process thread, or an error.
@@ -285,7 +387,10 @@ func SetExecLabel(label string) error {
// Get returns the Context as a string
func (c Context) Get() string {
return fmt.Sprintf("%s:%s:%s:%s", c["user"], c["role"], c["type"], c["level"])
if c["level"] != "" {
return fmt.Sprintf("%s:%s:%s:%s", c["user"], c["role"], c["type"], c["level"])
}
return fmt.Sprintf("%s:%s:%s", c["user"], c["role"], c["type"])
}
// NewContext creates a new Context struct from the specified label
@@ -297,7 +402,9 @@ func NewContext(label string) Context {
c["user"] = con[0]
c["role"] = con[1]
c["type"] = con[2]
c["level"] = con[3]
if len(con) > 3 {
c["level"] = con[3]
}
}
return c
}
@@ -306,12 +413,14 @@ func NewContext(label string) Context {
func ReserveLabel(label string) {
if len(label) != 0 {
con := strings.SplitN(label, ":", 4)
mcsAdd(con[3])
if len(con) > 3 {
mcsAdd(con[3])
}
}
}
func selinuxEnforcePath() string {
return fmt.Sprintf("%s/enforce", selinuxPath)
return fmt.Sprintf("%s/enforce", getSelinuxMountPoint())
}
// EnforceMode returns the current SELinux mode Enforcing, Permissive, Disabled
@@ -354,16 +463,22 @@ func DefaultEnforceMode() int {
}
func mcsAdd(mcs string) error {
if mcs == "" {
return nil
}
state.Lock()
defer state.Unlock()
if state.mcsList[mcs] {
return fmt.Errorf("MCS Label already exists")
return ErrMCSAlreadyExists
}
state.mcsList[mcs] = true
return nil
}
func mcsDelete(mcs string) {
if mcs == "" {
return
}
state.Lock()
defer state.Unlock()
state.mcsList[mcs] = false
@@ -424,14 +539,14 @@ Allowing it to be used by another process.
func ReleaseLabel(label string) {
if len(label) != 0 {
con := strings.SplitN(label, ":", 4)
mcsDelete(con[3])
if len(con) > 3 {
mcsDelete(con[3])
}
}
}
var roFileLabel string
// ROFileLabel returns the specified SELinux readonly file label
func ROFileLabel() (fileLabel string) {
func ROFileLabel() string {
return roFileLabel
}
@@ -497,23 +612,25 @@ func ContainerLabels() (processLabel string, fileLabel string) {
roFileLabel = fileLabel
}
exit:
mcs := uniqMcs(1024)
scon := NewContext(processLabel)
scon["level"] = mcs
processLabel = scon.Get()
scon = NewContext(fileLabel)
scon["level"] = mcs
fileLabel = scon.Get()
if scon["level"] != "" {
mcs := uniqMcs(1024)
scon["level"] = mcs
processLabel = scon.Get()
scon = NewContext(fileLabel)
scon["level"] = mcs
fileLabel = scon.Get()
}
return processLabel, fileLabel
}
// SecurityCheckContext validates that the SELinux label is understood by the kernel
func SecurityCheckContext(val string) error {
return writeCon(fmt.Sprintf("%s.context", selinuxPath), val)
return writeCon(fmt.Sprintf("%s/context", getSelinuxMountPoint()), val)
}
/*
CopyLevel returns a label with the MLS/MCS level from src label replaces on
CopyLevel returns a label with the MLS/MCS level from src label replaced on
the dest label.
*/
func CopyLevel(src, dest string) (string, error) {
@@ -536,20 +653,26 @@ func CopyLevel(src, dest string) (string, error) {
// Prevent users from relabing system files
func badPrefix(fpath string) error {
var badprefixes = []string{"/usr"}
if fpath == "" {
return ErrEmptyPath
}
for _, prefix := range badprefixes {
if fpath == prefix || strings.HasPrefix(fpath, fmt.Sprintf("%s/", prefix)) {
badPrefixes := []string{"/usr"}
for _, prefix := range badPrefixes {
if strings.HasPrefix(fpath, prefix) {
return fmt.Errorf("relabeling content in %s is not allowed", prefix)
}
}
return nil
}
// Chcon changes the fpath file object to the SELinux label label.
// If the fpath is a directory and recurse is true Chcon will walk the
// directory tree setting the label
// Chcon changes the `fpath` file object to the SELinux label `label`.
// If `fpath` is a directory and `recurse`` is true, Chcon will walk the
// directory tree setting the label.
func Chcon(fpath string, label string, recurse bool) error {
if fpath == "" {
return ErrEmptyPath
}
if label == "" {
return nil
}
@@ -568,7 +691,7 @@ func Chcon(fpath string, label string, recurse bool) error {
}
// DupSecOpt takes an SELinux process label and returns security options that
// can will set the SELinux Type and Level for future container processes
// can be used to set the SELinux Type and Level for future container processes.
func DupSecOpt(src string) []string {
if src == "" {
return nil
@@ -576,18 +699,23 @@ func DupSecOpt(src string) []string {
con := NewContext(src)
if con["user"] == "" ||
con["role"] == "" ||
con["type"] == "" ||
con["level"] == "" {
con["type"] == "" {
return nil
}
return []string{"user:" + con["user"],
dup := []string{"user:" + con["user"],
"role:" + con["role"],
"type:" + con["type"],
"level:" + con["level"]}
}
if con["level"] != "" {
dup = append(dup, "level:"+con["level"])
}
return dup
}
// DisableSecOpt returns a security opt that can be used to disabling SELinux
// labeling support for future container processes
// DisableSecOpt returns a security opt that can be used to disable SELinux
// labeling support for future container processes.
func DisableSecOpt() []string {
return []string{"disable"}
}

View File

@@ -0,0 +1,188 @@
// +build !selinux
package selinux
import (
"errors"
)
const (
// Enforcing constant indicate SELinux is in enforcing mode
Enforcing = 1
// Permissive constant to indicate SELinux is in permissive mode
Permissive = 0
// Disabled constant to indicate SELinux is disabled
Disabled = -1
)
var (
// ErrMCSAlreadyExists is returned when trying to allocate a duplicate MCS.
ErrMCSAlreadyExists = errors.New("MCS label already exists")
// ErrEmptyPath is returned when an empty path has been specified.
ErrEmptyPath = errors.New("empty path")
)
// Context is a representation of the SELinux label broken into 4 parts
type Context map[string]string
// SetDisabled disables selinux support for the package
func SetDisabled() {
return
}
// GetEnabled returns whether selinux is currently enabled.
func GetEnabled() bool {
return false
}
// SetFileLabel sets the SELinux label for this path or returns an error.
func SetFileLabel(fpath string, label string) error {
return nil
}
// FileLabel returns the SELinux label for this path or returns an error.
func FileLabel(fpath string) (string, error) {
return "", nil
}
/*
SetFSCreateLabel tells kernel the label to create all file system objects
created by this task. Setting label="" to return to default.
*/
func SetFSCreateLabel(label string) error {
return nil
}
/*
FSCreateLabel returns the default label the kernel which the kernel is using
for file system objects created by this task. "" indicates default.
*/
func FSCreateLabel() (string, error) {
return "", nil
}
// CurrentLabel returns the SELinux label of the current process thread, or an error.
func CurrentLabel() (string, error) {
return "", nil
}
// PidLabel returns the SELinux label of the given pid, or an error.
func PidLabel(pid int) (string, error) {
return "", nil
}
/*
ExecLabel returns the SELinux label that the kernel will use for any programs
that are executed by the current process thread, or an error.
*/
func ExecLabel() (string, error) {
return "", nil
}
/*
CanonicalizeContext takes a context string and writes it to the kernel
the function then returns the context that the kernel will use. This function
can be used to see if two contexts are equivalent
*/
func CanonicalizeContext(val string) (string, error) {
return "", nil
}
/*
SetExecLabel sets the SELinux label that the kernel will use for any programs
that are executed by the current process thread, or an error.
*/
func SetExecLabel(label string) error {
return nil
}
// Get returns the Context as a string
func (c Context) Get() string {
return ""
}
// NewContext creates a new Context struct from the specified label
func NewContext(label string) Context {
c := make(Context)
return c
}
// ReserveLabel reserves the MLS/MCS level component of the specified label
func ReserveLabel(label string) {
return
}
// EnforceMode returns the current SELinux mode Enforcing, Permissive, Disabled
func EnforceMode() int {
return Disabled
}
/*
SetEnforceMode sets the current SELinux mode Enforcing, Permissive.
Disabled is not valid, since this needs to be set at boot time.
*/
func SetEnforceMode(mode int) error {
return nil
}
/*
DefaultEnforceMode returns the systems default SELinux mode Enforcing,
Permissive or Disabled. Note this is is just the default at boot time.
EnforceMode tells you the systems current mode.
*/
func DefaultEnforceMode() int {
return Disabled
}
/*
ReleaseLabel will unreserve the MLS/MCS Level field of the specified label.
Allowing it to be used by another process.
*/
func ReleaseLabel(label string) {
return
}
// ROFileLabel returns the specified SELinux readonly file label
func ROFileLabel() string {
return ""
}
/*
ContainerLabels returns an allocated processLabel and fileLabel to be used for
container labeling by the calling process.
*/
func ContainerLabels() (processLabel string, fileLabel string) {
return "", ""
}
// SecurityCheckContext validates that the SELinux label is understood by the kernel
func SecurityCheckContext(val string) error {
return nil
}
/*
CopyLevel returns a label with the MLS/MCS level from src label replaced on
the dest label.
*/
func CopyLevel(src, dest string) (string, error) {
return "", nil
}
// Chcon changes the `fpath` file object to the SELinux label `label`.
// If `fpath` is a directory and `recurse`` is true, Chcon will walk the
// directory tree setting the label.
func Chcon(fpath string, label string, recurse bool) error {
return nil
}
// DupSecOpt takes an SELinux process label and returns security options that
// can be used to set the SELinux Type and Level for future container processes.
func DupSecOpt(src string) []string {
return nil
}
// DisableSecOpt returns a security opt that can be used to disable SELinux
// labeling support for future container processes.
func DisableSecOpt() []string {
return []string{"disable"}
}

View File

@@ -1,4 +1,4 @@
// +build linux
// +build selinux,linux
package selinux

View File

@@ -1,6 +1,8 @@
# go-patricia #
**Documentation**: [GoDoc](http://godoc.org/github.com/tchap/go-patricia/patricia)<br />
**Build Status**: [![Build
Status](https://drone.io/github.com/tchap/go-patricia/status.png)](https://drone.io/github.com/tchap/go-patricia/latest)<br />
**Test Coverage**: [![Coverage
Status](https://coveralls.io/repos/tchap/go-patricia/badge.png)](https://coveralls.io/r/tchap/go-patricia)
@@ -115,3 +117,7 @@ MIT, check the `LICENSE` file.
[![Gittip
Badge](http://img.shields.io/gittip/alanhamlett.png)](https://www.gittip.com/tchap/
"Gittip Badge")
[![Bitdeli
Badge](https://d2weczhvl823v0.cloudfront.net/tchap/go-patricia/trend.png)](https://bitdeli.com/free
"Bitdeli Badge")