Signed-off-by: Lantao Liu <lantaol@google.com>
This commit is contained in:
Lantao Liu
2019-01-02 10:50:25 -08:00
parent 6ba92a7921
commit b55c9c6c35
105 changed files with 6095 additions and 5218 deletions

View File

@@ -1,7 +1,7 @@
# cri
<p align="center">
<img src="https://kubernetes.io/images/favicon.png" width="50" height="50">
<img src="https://containerd.io/img/containerd-dark.png" width="200" >
<img src="https://containerd.io/img/logos/icon/black/containerd-icon-black.png" width="50" >
</p>
*Note: The standalone `cri-containerd` binary is end-of-life. `cri-containerd` is
@@ -36,6 +36,7 @@ See [test dashboard](https://k8s-testgrid.appspot.com/sig-node-containerd)
| v1.0.0-alpha.x | | 1.7, 1.8 | v1alpha1 |
| v1.0.0-beta.x | | 1.9 | v1alpha1 |
| End-Of-Life | v1.1 | 1.10+ | v1alpha2 |
| | v1.2 | 1.10+ | v1alpha2 |
| | HEAD | 1.10+ | v1alpha2 |
## Production Quality Cluster on GCE
@@ -149,7 +150,8 @@ implementation.
For sync communication we have a community slack with a #containerd channel that
everyone is welcome to join and chat about development.
**Slack:** https://dockr.ly/community
**Slack:** Catch us in the #containerd and #containerd-dev channels on dockercommunity.slack.com.
[Click here for an invite to docker community slack.](https://join.slack.com/t/dockercommunity/shared_invite/enQtNDY4MDc1Mzc0MzIwLTgxZDBlMmM4ZGEyNDc1N2FkMzlhODJkYmE1YTVkYjM1MDE3ZjAwZjBkOGFlOTJkZjRmZGYzNjYyY2M3ZTUxYzQ)
## Other Communications
As this project is tightly coupled to CRI and CRI-Tools and they are Kubernetes

View File

@@ -36,6 +36,7 @@ import (
imagespec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"k8s.io/klog"
criconfig "github.com/containerd/cri/pkg/config"
"github.com/containerd/cri/pkg/constants"
@@ -175,16 +176,18 @@ func getServicesOpts(ic *plugin.InitContext) ([]containerd.ServicesOpt, error) {
// Set glog level.
func setGLogLevel() error {
l := logrus.GetLevel()
if err := flag.Set("logtostderr", "true"); err != nil {
fs := flag.NewFlagSet("klog", flag.PanicOnError)
klog.InitFlags(fs)
if err := fs.Set("logtostderr", "true"); err != nil {
return err
}
switch l {
case log.TraceLevel:
return flag.Set("v", "5")
return fs.Set("v", "5")
case logrus.DebugLevel:
return flag.Set("v", "4")
return fs.Set("v", "4")
case logrus.InfoLevel:
return flag.Set("v", "2")
return fs.Set("v", "2")
// glog doesn't support following filters. Defaults to v=0.
case logrus.WarnLevel:
case logrus.ErrorLevel:

220
vendor/github.com/containerd/cri/pkg/netns/netns.go generated vendored Normal file
View File

@@ -0,0 +1,220 @@
/*
Copyright 2018 The Containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Copyright 2018 CNI authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package netns
import (
"crypto/rand"
"fmt"
"os"
"path"
"runtime"
"sync"
cnins "github.com/containernetworking/plugins/pkg/ns"
"github.com/docker/docker/pkg/symlink"
"github.com/pkg/errors"
"golang.org/x/sys/unix"
osinterface "github.com/containerd/cri/pkg/os"
)
const nsRunDir = "/var/run/netns"
// Some of the following functions are migrated from
// https://github.com/containernetworking/plugins/blob/master/pkg/testutils/netns_linux.go
// newNS creates a new persistent (bind-mounted) network namespace and returns the
// path to the network namespace.
func newNS() (nsPath string, err error) {
b := make([]byte, 16)
if _, err := rand.Reader.Read(b); err != nil {
return "", errors.Wrap(err, "failed to generate random netns name")
}
// Create the directory for mounting network namespaces
// This needs to be a shared mountpoint in case it is mounted in to
// other namespaces (containers)
if err := os.MkdirAll(nsRunDir, 0755); err != nil {
return "", err
}
// create an empty file at the mount point
nsName := fmt.Sprintf("cni-%x-%x-%x-%x-%x", b[0:4], b[4:6], b[6:8], b[8:10], b[10:])
nsPath = path.Join(nsRunDir, nsName)
mountPointFd, err := os.Create(nsPath)
if err != nil {
return "", err
}
mountPointFd.Close()
defer func() {
// Ensure the mount point is cleaned up on errors
if err != nil {
os.RemoveAll(nsPath) // nolint: errcheck
}
}()
var wg sync.WaitGroup
wg.Add(1)
// do namespace work in a dedicated goroutine, so that we can safely
// Lock/Unlock OSThread without upsetting the lock/unlock state of
// the caller of this function
go (func() {
defer wg.Done()
runtime.LockOSThread()
// Don't unlock. By not unlocking, golang will kill the OS thread when the
// goroutine is done (for go1.10+)
var origNS cnins.NetNS
origNS, err = cnins.GetNS(getCurrentThreadNetNSPath())
if err != nil {
return
}
defer origNS.Close()
// create a new netns on the current thread
err = unix.Unshare(unix.CLONE_NEWNET)
if err != nil {
return
}
// Put this thread back to the orig ns, since it might get reused (pre go1.10)
defer origNS.Set() // nolint: errcheck
// bind mount the netns from the current thread (from /proc) onto the
// mount point. This causes the namespace to persist, even when there
// are no threads in the ns.
err = unix.Mount(getCurrentThreadNetNSPath(), nsPath, "none", unix.MS_BIND, "")
if err != nil {
err = errors.Wrapf(err, "failed to bind mount ns at %s", nsPath)
}
})()
wg.Wait()
if err != nil {
return "", errors.Wrap(err, "failed to create namespace")
}
return nsPath, nil
}
// unmountNS unmounts the NS held by the netns object. unmountNS is idempotent.
func unmountNS(path string) error {
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
return nil
}
return errors.Wrap(err, "failed to stat netns")
}
path, err := symlink.FollowSymlinkInScope(path, "/")
if err != nil {
return errors.Wrap(err, "failed to follow symlink")
}
if err := osinterface.Unmount(path); err != nil && !os.IsNotExist(err) {
return errors.Wrap(err, "failed to umount netns")
}
if err := os.RemoveAll(path); err != nil {
return errors.Wrap(err, "failed to remove netns")
}
return nil
}
// getCurrentThreadNetNSPath copied from pkg/ns
func getCurrentThreadNetNSPath() string {
// /proc/self/ns/net returns the namespace of the main thread, not
// of whatever thread this goroutine is running on. Make sure we
// use the thread's net namespace since the thread is switching around
return fmt.Sprintf("/proc/%d/task/%d/ns/net", os.Getpid(), unix.Gettid())
}
// NetNS holds network namespace.
type NetNS struct {
path string
}
// NewNetNS creates a network namespace.
func NewNetNS() (*NetNS, error) {
path, err := newNS()
if err != nil {
return nil, errors.Wrap(err, "failed to setup netns")
}
return &NetNS{path: path}, nil
}
// LoadNetNS loads existing network namespace.
func LoadNetNS(path string) *NetNS {
return &NetNS{path: path}
}
// Remove removes network namepace. Remove is idempotent, meaning it might
// be invoked multiple times and provides consistent result.
func (n *NetNS) Remove() error {
return unmountNS(n.path)
}
// Closed checks whether the network namespace has been closed.
func (n *NetNS) Closed() (bool, error) {
ns, err := cnins.GetNS(n.path)
if err != nil {
if _, ok := err.(cnins.NSPathNotExistErr); ok {
// The network namespace has already been removed.
return true, nil
}
if _, ok := err.(cnins.NSPathNotNSErr); ok {
// The network namespace is not mounted, remove it.
if err := os.RemoveAll(n.path); err != nil {
return false, errors.Wrap(err, "remove netns")
}
return true, nil
}
return false, errors.Wrap(err, "get netns fd")
}
if err := ns.Close(); err != nil {
return false, errors.Wrap(err, "close netns fd")
}
return false, nil
}
// GetPath returns network namespace path for sandbox container
func (n *NetNS) GetPath() string {
return n.path
}
// Do runs a function in the network namespace.
func (n *NetNS) Do(f func(cnins.NetNS) error) error {
ns, err := cnins.GetNS(n.path)
if err != nil {
return errors.Wrap(err, "get netns fd")
}
defer ns.Close() // nolint: errcheck
return ns.Do(f)
}

View File

@@ -187,6 +187,7 @@ func (c *criService) CreateContainer(ctx context.Context, r *runtime.CreateConta
opts = append(opts, customopts.WithVolumes(mountMap))
}
meta.ImageRef = image.ID
meta.StopSignal = image.ImageSpec.Config.StopSignal
// Get container log path.
if config.GetLogPath() != "" {

View File

@@ -24,6 +24,7 @@ import (
"golang.org/x/net/context"
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
"github.com/containerd/cri/pkg/store"
containerstore "github.com/containerd/cri/pkg/store/container"
)
@@ -43,17 +44,20 @@ func (c *criService) ContainerStatus(ctx context.Context, r *runtime.ContainerSt
imageRef := container.ImageRef
image, err := c.imageStore.Get(imageRef)
if err != nil {
return nil, errors.Wrapf(err, "failed to get image %q", imageRef)
}
repoTags, repoDigests := parseImageReferences(image.References)
if len(repoTags) > 0 {
// Based on current behavior of dockershim, this field should be
// image tag.
spec = &runtime.ImageSpec{Image: repoTags[0]}
}
if len(repoDigests) > 0 {
// Based on the CRI definition, this field will be consumed by user.
imageRef = repoDigests[0]
if err != store.ErrNotExist {
return nil, errors.Wrapf(err, "failed to get image %q", imageRef)
}
} else {
repoTags, repoDigests := parseImageReferences(image.References)
if len(repoTags) > 0 {
// Based on current behavior of dockershim, this field should be
// image tag.
spec = &runtime.ImageSpec{Image: repoTags[0]}
}
if len(repoDigests) > 0 {
// Based on the CRI definition, this field will be consumed by user.
imageRef = repoDigests[0]
}
}
status := toCRIContainerStatus(container, spec, imageRef)
info, err := toCRIContainerInfo(ctx, container, r.GetVerbose())

View File

@@ -19,7 +19,6 @@ package server
import (
"time"
"github.com/containerd/containerd"
"github.com/containerd/containerd/errdefs"
"github.com/docker/docker/pkg/signal"
"github.com/pkg/errors"
@@ -28,6 +27,7 @@ import (
"golang.org/x/sys/unix"
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
"github.com/containerd/cri/pkg/store"
containerstore "github.com/containerd/cri/pkg/store/container"
)
@@ -77,24 +77,36 @@ func (c *criService) stopContainer(ctx context.Context, container containerstore
// We only need to kill the task. The event handler will Delete the
// task from containerd after it handles the Exited event.
if timeout > 0 {
stopSignal := unix.SIGTERM
image, err := c.imageStore.Get(container.ImageRef)
if err != nil {
// NOTE(random-liu): It's possible that the container is stopped,
// deleted and image is garbage collected before this point. However,
// the chance is really slim, even it happens, it's still fine to return
// an error here.
return errors.Wrapf(err, "failed to get image metadata %q", container.ImageRef)
}
if image.ImageSpec.Config.StopSignal != "" {
stopSignal, err = signal.ParseSignal(image.ImageSpec.Config.StopSignal)
stopSignal := "SIGTERM"
if container.StopSignal != "" {
stopSignal = container.StopSignal
} else {
// The image may have been deleted, and the `StopSignal` field is
// just introduced to handle that.
// However, for containers created before the `StopSignal` field is
// introduced, still try to get the stop signal from the image config.
// If the image has been deleted, logging an error and using the
// default SIGTERM is still better than returning error and leaving
// the container unstoppable. (See issue #990)
// TODO(random-liu): Remove this logic when containerd 1.2 is deprecated.
image, err := c.imageStore.Get(container.ImageRef)
if err != nil {
return errors.Wrapf(err, "failed to parse stop signal %q",
image.ImageSpec.Config.StopSignal)
if err != store.ErrNotExist {
return errors.Wrapf(err, "failed to get image %q", container.ImageRef)
}
logrus.Warningf("Image %q not found, stop container with signal %q", container.ImageRef, stopSignal)
} else {
if image.ImageSpec.Config.StopSignal != "" {
stopSignal = image.ImageSpec.Config.StopSignal
}
}
}
logrus.Infof("Stop container %q with signal %v", id, stopSignal)
if err = task.Kill(ctx, stopSignal); err != nil && !errdefs.IsNotFound(err) {
sig, err := signal.ParseSignal(stopSignal)
if err != nil {
return errors.Wrapf(err, "failed to parse stop signal %q", stopSignal)
}
logrus.Infof("Stop container %q with signal %v", id, sig)
if err = task.Kill(ctx, sig); err != nil && !errdefs.IsNotFound(err) {
return errors.Wrapf(err, "failed to stop container %q", id)
}
@@ -105,7 +117,7 @@ func (c *criService) stopContainer(ctx context.Context, container containerstore
}
logrus.Infof("Kill container %q", id)
if err = task.Kill(ctx, unix.SIGKILL, containerd.WithKillAll); err != nil && !errdefs.IsNotFound(err) {
if err = task.Kill(ctx, unix.SIGKILL); err != nil && !errdefs.IsNotFound(err) {
return errors.Wrapf(err, "failed to kill container %q", id)
}
@@ -113,28 +125,7 @@ func (c *criService) stopContainer(ctx context.Context, container containerstore
if err = c.waitContainerStop(ctx, container, killContainerTimeout); err == nil {
return nil
}
logrus.WithError(err).Errorf("An error occurs during waiting for container %q to be killed", id)
// This is a fix for `runc`, and should not break other runtimes. With
// containerd.WithKillAll, `runc` will get all processes from the container
// cgroups, and kill them. However, sometimes the processes may be moved
// out from the container cgroup, e.g. users manually move them by mistake,
// or systemd.Delegate=true is not set.
// In these cases, we should try our best to do cleanup, kill the container
// without containerd.WithKillAll, so that runc can kill the container init
// process directly.
// NOTE(random-liu): If pid namespace is shared inside the pod, non-init processes
// of this container will be left running until the pause container is stopped.
logrus.Infof("Kill container %q init process", id)
if err = task.Kill(ctx, unix.SIGKILL); err != nil && !errdefs.IsNotFound(err) {
return errors.Wrapf(err, "failed to kill container %q init process", id)
}
// Wait for a fixed timeout until container stop is observed by event monitor.
if err = c.waitContainerStop(ctx, container, killContainerTimeout); err == nil {
return nil
}
return errors.Wrapf(err, "an error occurs during waiting for container %q init process to be killed", id)
return errors.Wrapf(err, "an error occurs during waiting for container %q to be killed", id)
}
// waitContainerStop waits for container to be stopped until timeout exceeds or context is cancelled.

View File

@@ -20,6 +20,7 @@ import (
"sync"
"time"
"github.com/containerd/containerd"
eventtypes "github.com/containerd/containerd/api/events"
containerdio "github.com/containerd/containerd/cio"
"github.com/containerd/containerd/errdefs"
@@ -31,6 +32,7 @@ import (
"golang.org/x/net/context"
"k8s.io/apimachinery/pkg/util/clock"
"github.com/containerd/cri/pkg/constants"
ctrdutil "github.com/containerd/cri/pkg/containerd/util"
"github.com/containerd/cri/pkg/store"
containerstore "github.com/containerd/cri/pkg/store/container"
@@ -82,7 +84,6 @@ type backOffQueue struct {
// Create new event monitor. New event monitor will start subscribing containerd event. All events
// happen after it should be monitored.
func newEventMonitor(c *criService) *eventMonitor {
// event subscribe doesn't need namespace.
ctx, cancel := context.WithCancel(context.Background())
return &eventMonitor{
c: c,
@@ -94,6 +95,8 @@ func newEventMonitor(c *criService) *eventMonitor {
// subscribe starts to subscribe containerd events.
func (em *eventMonitor) subscribe(subscriber events.Subscriber) {
// note: filters are any match, if you want any match but not in namespace foo
// then you have to manually filter namespace foo
filters := []string{
`topic=="/tasks/exit"`,
`topic=="/tasks/oom"`,
@@ -141,6 +144,10 @@ func (em *eventMonitor) start() <-chan error {
select {
case e := <-em.ch:
logrus.Debugf("Received containerd event timestamp - %v, namespace - %q, topic - %q", e.Timestamp, e.Namespace, e.Topic)
if e.Namespace != constants.K8sContainerdNamespace {
logrus.Debugf("Ignoring events in namespace - %q", e.Namespace)
break
}
id, evt, err := convertEvent(e.Event)
if err != nil {
logrus.WithError(err).Errorf("Failed to convert event %+v", e)
@@ -194,14 +201,11 @@ func (em *eventMonitor) handleEvent(any interface{}) error {
defer cancel()
switch any.(type) {
// If containerd-shim exits unexpectedly, there will be no corresponding event.
// However, containerd could not retrieve container state in that case, so it's
// fine to leave out that case for now.
// TODO(random-liu): [P2] Handle containerd-shim exit.
case *eventtypes.TaskExit:
e := any.(*eventtypes.TaskExit)
logrus.Infof("TaskExit event %+v", e)
cntr, err := em.c.containerStore.Get(e.ContainerID)
// Use ID instead of ContainerID to rule out TaskExit event for exec.
cntr, err := em.c.containerStore.Get(e.ID)
if err == nil {
if err := handleContainerExit(ctx, e, cntr); err != nil {
return errors.Wrap(err, "failed to handle container TaskExit event")
@@ -211,7 +215,7 @@ func (em *eventMonitor) handleEvent(any interface{}) error {
return errors.Wrap(err, "can't find container for TaskExit event")
}
// Use GetAll to include sandbox in unknown state.
sb, err := em.c.sandboxStore.GetAll(e.ContainerID)
sb, err := em.c.sandboxStore.GetAll(e.ID)
if err == nil {
if err := handleSandboxExit(ctx, e, sb); err != nil {
return errors.Wrap(err, "failed to handle sandbox TaskExit event")
@@ -224,17 +228,12 @@ func (em *eventMonitor) handleEvent(any interface{}) error {
case *eventtypes.TaskOOM:
e := any.(*eventtypes.TaskOOM)
logrus.Infof("TaskOOM event %+v", e)
// For TaskOOM, we only care which container it belongs to.
cntr, err := em.c.containerStore.Get(e.ContainerID)
if err != nil {
if err != store.ErrNotExist {
return errors.Wrap(err, "can't find container for TaskOOM event")
}
if _, err = em.c.sandboxStore.Get(e.ContainerID); err != nil {
if err != store.ErrNotExist {
return errors.Wrap(err, "can't find sandbox for TaskOOM event")
}
return nil
}
return nil
}
err = cntr.Status.UpdateSync(func(status containerstore.Status) (containerstore.Status, error) {
@@ -263,10 +262,6 @@ func (em *eventMonitor) handleEvent(any interface{}) error {
// handleContainerExit handles TaskExit event for container.
func handleContainerExit(ctx context.Context, e *eventtypes.TaskExit, cntr containerstore.Container) error {
if e.Pid != cntr.Status.Get().Pid {
// Non-init process died, ignore the event.
return nil
}
// Attach container IO so that `Delete` could cleanup the stream properly.
task, err := cntr.Container.Task(ctx,
func(*containerdio.FIFOSet) (containerdio.IO, error) {
@@ -279,7 +274,7 @@ func handleContainerExit(ctx context.Context, e *eventtypes.TaskExit, cntr conta
}
} else {
// TODO(random-liu): [P1] This may block the loop, we may want to spawn a worker
if _, err = task.Delete(ctx); err != nil {
if _, err = task.Delete(ctx, containerd.WithProcessKill); err != nil {
if !errdefs.IsNotFound(err) {
return errors.Wrap(err, "failed to stop container")
}
@@ -307,10 +302,6 @@ func handleContainerExit(ctx context.Context, e *eventtypes.TaskExit, cntr conta
// handleSandboxExit handles TaskExit event for sandbox.
func handleSandboxExit(ctx context.Context, e *eventtypes.TaskExit, sb sandboxstore.Sandbox) error {
if e.Pid != sb.Status.Get().Pid {
// Non-init process died, ignore the event.
return nil
}
// No stream attached to sandbox container.
task, err := sb.Container.Task(ctx, nil)
if err != nil {
@@ -319,7 +310,7 @@ func handleSandboxExit(ctx context.Context, e *eventtypes.TaskExit, sb sandboxst
}
} else {
// TODO(random-liu): [P1] This may block the loop, we may want to spawn a worker
if _, err = task.Delete(ctx); err != nil {
if _, err = task.Delete(ctx, containerd.WithProcessKill); err != nil {
if !errdefs.IsNotFound(err) {
return errors.Wrap(err, "failed to stop sandbox")
}

View File

@@ -137,9 +137,9 @@ const (
// generated is unique as long as sandbox metadata is unique.
func makeSandboxName(s *runtime.PodSandboxMetadata) string {
return strings.Join([]string{
s.Name, // 0
s.Namespace, // 1
s.Uid, // 2
s.Name, // 0
s.Namespace, // 1
s.Uid, // 2
fmt.Sprintf("%d", s.Attempt), // 3
}, nameDelimiter)
}
@@ -149,10 +149,10 @@ func makeSandboxName(s *runtime.PodSandboxMetadata) string {
// unique.
func makeContainerName(c *runtime.ContainerMetadata, s *runtime.PodSandboxMetadata) string {
return strings.Join([]string{
c.Name, // 0
s.Name, // 1: pod name
s.Namespace, // 2: pod namespace
s.Uid, // 3: pod uid
c.Name, // 0
s.Name, // 1: pod name
s.Namespace, // 2: pod namespace
s.Uid, // 3: pod uid
fmt.Sprintf("%d", c.Attempt), // 4
}, nameDelimiter)
}
@@ -353,7 +353,7 @@ func checkSelinuxLevel(level string) (bool, error) {
matched, err := regexp.MatchString(`^s\d(-s\d)??(:c\d{1,4}((.c\d{1,4})?,c\d{1,4})*(.c\d{1,4})?(,c\d{1,4}(.c\d{1,4})?)*)?$`, level)
if err != nil || !matched {
return false, fmt.Errorf("the format of 'level' %q is not correct: %v", level, err)
return false, errors.Wrapf(err, "the format of 'level' %q is not correct", level)
}
return true, nil
}

View File

@@ -61,11 +61,11 @@ import (
// if we saw an image without snapshots or with in-complete contents during startup,
// should we re-pull the image? Or should we remove the entry?
//
// yanxuean: We cann't delete image directly, because we don't know if the image
// yanxuean: We can't delete image directly, because we don't know if the image
// is pulled by us. There are resource leakage.
//
// 2) Containerd suggests user to add entry before pulling the image. However if
// an error occurrs during the pulling, should we remove the entry from metadata
// an error occurs during the pulling, should we remove the entry from metadata
// store? Or should we leave it there until next startup (resource leakage)?
//
// 3) The cri plugin only exposes "READY" (successfully pulled and unpacked) images

View File

@@ -299,7 +299,7 @@ func (in *instrumentedService) PullImage(ctx context.Context, r *runtime.PullIma
if err := in.checkInitialized(); err != nil {
return nil, err
}
logrus.Infof("PullImage %q with auth config %+v", r.GetImage().GetImage(), r.GetAuth())
logrus.Infof("PullImage %q", r.GetImage().GetImage())
defer func() {
if err != nil {
logrus.WithError(err).Errorf("PullImage %q failed", r.GetImage().GetImage())

View File

@@ -34,6 +34,7 @@ import (
"golang.org/x/net/context"
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
"github.com/containerd/cri/pkg/netns"
cio "github.com/containerd/cri/pkg/server/io"
containerstore "github.com/containerd/cri/pkg/store/container"
sandboxstore "github.com/containerd/cri/pkg/store/sandbox"
@@ -145,7 +146,7 @@ func (c *criService) recover(ctx context.Context) error {
// * ListContainerStats: Not in critical code path, a default timeout will
// be applied at CRI level.
// * Recovery logic: We should set a time for each container/sandbox recovery.
// * Event montior: We should set a timeout for each container/sandbox event handling.
// * Event monitor: We should set a timeout for each container/sandbox event handling.
const loadContainerTimeout = 10 * time.Second
// loadContainer loads container from containerd and status checkpoint.
@@ -394,14 +395,7 @@ func loadSandbox(ctx context.Context, cntr containerd.Container) (sandboxstore.S
// Don't need to load netns for host network sandbox.
return sandbox, nil
}
netNS, err := sandboxstore.LoadNetNS(meta.NetNSPath)
if err != nil {
if err != sandboxstore.ErrClosedNetNS {
return sandbox, errors.Wrapf(err, "failed to load netns %q", meta.NetNSPath)
}
netNS = nil
}
sandbox.NetNS = netNS
sandbox.NetNS = netns.LoadNetNS(meta.NetNSPath)
// It doesn't matter whether task is running or not. If it is running, sandbox
// status will be `READY`; if it is not running, sandbox status will be `NOT_READY`,

View File

@@ -59,10 +59,12 @@ func (c *criService) portForward(id string, port int32, stream io.ReadWriteClose
securityContext := s.Config.GetLinux().GetSecurityContext()
hostNet := securityContext.GetNamespaceOptions().GetNetwork() == runtime.NamespaceMode_NODE
if !hostNet {
if s.NetNS == nil || s.NetNS.Closed() {
if closed, err := s.NetNS.Closed(); err != nil {
return errors.Wrapf(err, "failed to check netwok namespace closed for sandbox %q", id)
} else if closed {
return errors.Errorf("network namespace for sandbox %q is closed", id)
}
netNSDo = s.NetNS.GetNs().Do
netNSDo = s.NetNS.Do
netNSPath = s.NetNS.GetPath()
} else {
// Run the function directly for host network.

View File

@@ -52,8 +52,13 @@ func (c *criService) RemovePodSandbox(ctx context.Context, r *runtime.RemovePodS
}
// Return error if sandbox network namespace is not closed yet.
if sandbox.NetNS != nil && !sandbox.NetNS.Closed() {
return nil, errors.Errorf("sandbox network namespace %q is not fully closed", sandbox.NetNS.GetPath())
if sandbox.NetNS != nil {
nsPath := sandbox.NetNS.GetPath()
if closed, err := sandbox.NetNS.Closed(); err != nil {
return nil, errors.Wrapf(err, "failed to check sandbox network namespace %q closed", nsPath)
} else if !closed {
return nil, errors.Errorf("sandbox network namespace %q is not fully closed", nsPath)
}
}
// Remove all containers inside the sandbox.

View File

@@ -17,6 +17,7 @@ limitations under the License.
package server
import (
"encoding/json"
"fmt"
"os"
"strings"
@@ -40,6 +41,7 @@ import (
customopts "github.com/containerd/cri/pkg/containerd/opts"
ctrdutil "github.com/containerd/cri/pkg/containerd/util"
"github.com/containerd/cri/pkg/log"
"github.com/containerd/cri/pkg/netns"
sandboxstore "github.com/containerd/cri/pkg/store/sandbox"
"github.com/containerd/cri/pkg/util"
)
@@ -88,6 +90,13 @@ func (c *criService) RunPodSandbox(ctx context.Context, r *runtime.RunPodSandbox
if err != nil {
return nil, errors.Wrapf(err, "failed to get sandbox image %q", c.config.SandboxImage)
}
ociRuntime, err := c.getSandboxRuntime(config, r.GetRuntimeHandler())
if err != nil {
return nil, errors.Wrap(err, "failed to get sandbox runtime")
}
logrus.Debugf("Use OCI %+v for sandbox %q", ociRuntime, id)
securityContext := config.GetLinux().GetSecurityContext()
//Create Network Namespace if it is not in host network
hostNet := securityContext.GetNamespaceOptions().GetNetwork() == runtime.NamespaceMode_NODE
@@ -96,7 +105,7 @@ func (c *criService) RunPodSandbox(ctx context.Context, r *runtime.RunPodSandbox
// handle. NetNSPath in sandbox metadata and NetNS is non empty only for non host network
// namespaces. If the pod is in host network namespace then both are empty and should not
// be used.
sandbox.NetNS, err = sandboxstore.NewNetNS()
sandbox.NetNS, err = netns.NewNetNS()
if err != nil {
return nil, errors.Wrapf(err, "failed to create network namespace for sandbox %q", id)
}
@@ -117,7 +126,7 @@ func (c *criService) RunPodSandbox(ctx context.Context, r *runtime.RunPodSandbox
// In this case however caching the IP will add a subtle performance enhancement by avoiding
// calls to network namespace of the pod to query the IP of the veth interface on every
// SandboxStatus request.
sandbox.IP, err = c.setupPod(id, sandbox.NetNSPath, config)
sandbox.IP, sandbox.CNIResult, err = c.setupPod(id, sandbox.NetNSPath, config)
if err != nil {
return nil, errors.Wrapf(err, "failed to setup network for sandbox %q", id)
}
@@ -131,12 +140,6 @@ func (c *criService) RunPodSandbox(ctx context.Context, r *runtime.RunPodSandbox
}()
}
ociRuntime, err := c.getSandboxRuntime(config, r.GetRuntimeHandler())
if err != nil {
return nil, errors.Wrap(err, "failed to get sandbox runtime")
}
logrus.Debugf("Use OCI %+v for sandbox %q", ociRuntime, id)
// Create sandbox container.
spec, err := c.generateSandboxContainerSpec(id, config, &image.ImageSpec.Config, sandbox.NetNSPath)
if err != nil {
@@ -527,9 +530,9 @@ func (c *criService) unmountSandboxFiles(id string, config *runtime.PodSandboxCo
}
// setupPod setups up the network for a pod
func (c *criService) setupPod(id string, path string, config *runtime.PodSandboxConfig) (string, error) {
func (c *criService) setupPod(id string, path string, config *runtime.PodSandboxConfig) (string, *cni.CNIResult, error) {
if c.netPlugin == nil {
return "", errors.New("cni config not intialized")
return "", nil, errors.New("cni config not initialized")
}
labels := getPodCNILabels(id, config)
@@ -538,17 +541,18 @@ func (c *criService) setupPod(id string, path string, config *runtime.PodSandbox
cni.WithLabels(labels),
cni.WithCapabilityPortMap(toCNIPortMappings(config.GetPortMappings())))
if err != nil {
return "", err
return "", nil, err
}
logDebugCNIResult(id, result)
// Check if the default interface has IP config
if configs, ok := result.Interfaces[defaultIfName]; ok && len(configs.IPConfigs) > 0 {
return selectPodIP(configs.IPConfigs), nil
return selectPodIP(configs.IPConfigs), result, nil
}
// If it comes here then the result was invalid so destroy the pod network and return error
if err := c.teardownPod(id, path, config); err != nil {
logrus.WithError(err).Errorf("Failed to destroy network for sandbox %q", id)
}
return "", errors.Errorf("failed to find network info for sandbox %q", id)
return "", result, errors.Errorf("failed to find network info for sandbox %q", id)
}
// toCNIPortMappings converts CRI port mappings to CNI.
@@ -638,3 +642,15 @@ func (c *criService) getSandboxRuntime(config *runtime.PodSandboxConfig, runtime
}
return handler, nil
}
func logDebugCNIResult(sandboxID string, result *cni.CNIResult) {
if logrus.GetLevel() < logrus.DebugLevel {
return
}
cniResult, err := json.Marshal(result)
if err != nil {
logrus.WithError(err).Errorf("Failed to marshal CNI result for sandbox %q: %v", sandboxID, err)
return
}
logrus.Debugf("cni result for sandbox %q: %s", sandboxID, string(cniResult))
}

View File

@@ -21,6 +21,7 @@ import (
"github.com/containerd/containerd"
"github.com/containerd/containerd/errdefs"
cni "github.com/containerd/go-cni"
runtimespec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"golang.org/x/net/context"
@@ -36,7 +37,10 @@ func (c *criService) PodSandboxStatus(ctx context.Context, r *runtime.PodSandbox
return nil, errors.Wrap(err, "an error occurred when try to find sandbox")
}
ip := c.getIP(sandbox)
ip, err := c.getIP(sandbox)
if err != nil {
return nil, errors.Wrap(err, "failed to get sandbox ip")
}
status := toCRISandboxStatus(sandbox.Metadata, sandbox.Status.Get(), ip)
if !r.GetVerbose() {
return &runtime.PodSandboxStatusResponse{Status: status}, nil
@@ -54,21 +58,22 @@ func (c *criService) PodSandboxStatus(ctx context.Context, r *runtime.PodSandbox
}, nil
}
func (c *criService) getIP(sandbox sandboxstore.Sandbox) string {
func (c *criService) getIP(sandbox sandboxstore.Sandbox) (string, error) {
config := sandbox.Config
if config.GetLinux().GetSecurityContext().GetNamespaceOptions().GetNetwork() == runtime.NamespaceMode_NODE {
// For sandboxes using the node network we are not
// responsible for reporting the IP.
return ""
return "", nil
}
// The network namespace has been closed.
if sandbox.NetNS == nil || sandbox.NetNS.Closed() {
return ""
if closed, err := sandbox.NetNS.Closed(); err != nil {
return "", errors.Wrap(err, "check network namespace closed")
} else if closed {
return "", nil
}
return sandbox.IP
return sandbox.IP, nil
}
// toCRISandboxStatus converts sandbox metadata into CRI pod sandbox status.
@@ -113,6 +118,7 @@ type SandboxInfo struct {
RuntimeOptions interface{} `json:"runtimeOptions"`
Config *runtime.PodSandboxConfig `json:"config"`
RuntimeSpec *runtimespec.Spec `json:"runtimeSpec"`
CNIResult *cni.CNIResult `json:"cniResult"`
}
// toCRISandboxInfo converts internal container object information to CRI sandbox status response info map.
@@ -138,6 +144,7 @@ func toCRISandboxInfo(ctx context.Context, sandbox sandboxstore.Sandbox) (map[st
RuntimeHandler: sandbox.RuntimeHandler,
Status: string(processStatus),
Config: sandbox.Config,
CNIResult: sandbox.CNIResult,
}
if si.Status == "" {
@@ -146,9 +153,13 @@ func toCRISandboxInfo(ctx context.Context, sandbox sandboxstore.Sandbox) (map[st
si.Status = "deleted"
}
if sandbox.NetNSPath != "" {
if sandbox.NetNS != nil {
// Add network closed information if sandbox is not using host network.
si.NetNSClosed = (sandbox.NetNS == nil || sandbox.NetNS.Closed())
closed, err := sandbox.NetNS.Closed()
if err != nil {
return nil, errors.Wrap(err, "failed to check network namespace closed")
}
si.NetNSClosed = closed
}
spec, err := container.Spec(ctx)

View File

@@ -19,7 +19,6 @@ package server
import (
"time"
"github.com/containerd/containerd"
"github.com/containerd/containerd/errdefs"
cni "github.com/containerd/go-cni"
"github.com/pkg/errors"
@@ -43,9 +42,8 @@ func (c *criService) StopPodSandbox(ctx context.Context, r *runtime.StopPodSandb
id := sandbox.ID
// Stop all containers inside the sandbox. This terminates the container forcibly,
// and container may still be so production should not rely on this behavior.
// TODO(random-liu): Delete the sandbox container before this after permanent network namespace
// is introduced, so that no container will be started after that.
// and container may still be created, so production should not rely on this behavior.
// TODO(random-liu): Introduce a state in sandbox to avoid future container creation.
containers := c.containerStore.List()
for _, container := range containers {
if container.SandboxID != id {
@@ -58,27 +56,6 @@ func (c *criService) StopPodSandbox(ctx context.Context, r *runtime.StopPodSandb
}
}
// Teardown network for sandbox.
if sandbox.NetNSPath != "" {
netNSPath := sandbox.NetNSPath
if sandbox.NetNS == nil || sandbox.NetNS.Closed() {
// Use empty netns path if netns is not available. This is defined in:
// https://github.com/containernetworking/cni/blob/v0.7.0-alpha1/SPEC.md
netNSPath = ""
}
if err := c.teardownPod(id, netNSPath, sandbox.Config); err != nil {
return nil, errors.Wrapf(err, "failed to destroy network for sandbox %q", id)
}
// Close the sandbox network namespace if it was created
if sandbox.NetNS != nil {
if err = sandbox.NetNS.Remove(); err != nil {
return nil, errors.Wrapf(err, "failed to remove network namespace for sandbox %q", id)
}
}
}
logrus.Infof("TearDown network for sandbox %q successfully", id)
if err := c.unmountSandboxFiles(id, sandbox.Config); err != nil {
return nil, errors.Wrap(err, "failed to unmount sandbox files")
}
@@ -89,6 +66,27 @@ func (c *criService) StopPodSandbox(ctx context.Context, r *runtime.StopPodSandb
return nil, errors.Wrapf(err, "failed to stop sandbox container %q", id)
}
}
// Teardown network for sandbox.
if sandbox.NetNS != nil {
netNSPath := sandbox.NetNSPath
// Use empty netns path if netns is not available. This is defined in:
// https://github.com/containernetworking/cni/blob/v0.7.0-alpha1/SPEC.md
if closed, err := sandbox.NetNS.Closed(); err != nil {
return nil, errors.Wrap(err, "failed to check network namespace closed")
} else if closed {
netNSPath = ""
}
if err := c.teardownPod(id, netNSPath, sandbox.Config); err != nil {
return nil, errors.Wrapf(err, "failed to destroy network for sandbox %q", id)
}
if err = sandbox.NetNS.Remove(); err != nil {
return nil, errors.Wrapf(err, "failed to remove network namespace for sandbox %q", id)
}
}
logrus.Infof("TearDown network for sandbox %q successfully", id)
return &runtime.StopPodSandboxResponse{}, nil
}
@@ -106,18 +104,8 @@ func (c *criService) stopSandboxContainer(ctx context.Context, sandbox sandboxst
}
// Kill the sandbox container.
if err = task.Kill(ctx, unix.SIGKILL, containerd.WithKillAll); err != nil && !errdefs.IsNotFound(err) {
return errors.Wrap(err, "failed to kill sandbox container")
}
if err = c.waitSandboxStop(ctx, sandbox, killContainerTimeout); err == nil {
return nil
}
logrus.WithError(err).Errorf("An error occurs during waiting for sandbox %q to be killed", sandbox.ID)
// Kill the sandbox container init process.
if err = task.Kill(ctx, unix.SIGKILL); err != nil && !errdefs.IsNotFound(err) {
return errors.Wrap(err, "failed to kill sandbox container init process")
return errors.Wrap(err, "failed to kill sandbox container")
}
return c.waitSandboxStop(ctx, sandbox, killContainerTimeout)
@@ -140,7 +128,7 @@ func (c *criService) waitSandboxStop(ctx context.Context, sandbox sandboxstore.S
// teardownPod removes the network from the pod
func (c *criService) teardownPod(id string, path string, config *runtime.PodSandboxConfig) error {
if c.netPlugin == nil {
return errors.New("cni config not intialized")
return errors.New("cni config not initialized")
}
labels := getPodCNILabels(id, config)

View File

@@ -27,7 +27,7 @@ import (
// 1) Metadata is immutable after created.
// 2) Metadata is checkpointed as containerd container label.
// metadataVersion is current version of container metadata.
// metadataVersion is current version of container metadata.
const metadataVersion = "v1" // nolint
// versionedMetadata is the internal versioned container metadata.
@@ -58,6 +58,9 @@ type Metadata struct {
ImageRef string
// LogPath is the container log path.
LogPath string
// StopSignal is the system call signal that will be sent to the container to exit.
// TODO(random-liu): Add integration test for stop signal.
StopSignal string
}
// MarshalJSON encodes Metadata into bytes in json format.

View File

@@ -19,6 +19,7 @@ package sandbox
import (
"encoding/json"
cni "github.com/containerd/go-cni"
"github.com/pkg/errors"
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
)
@@ -56,6 +57,8 @@ type Metadata struct {
IP string
// RuntimeHandler is the runtime handler name of the pod.
RuntimeHandler string
// CNI result
CNIResult *cni.CNIResult
}
// MarshalJSON encodes Metadata into bytes in json format.

View File

@@ -1,132 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package sandbox
import (
"os"
"sync"
cnins "github.com/containernetworking/plugins/pkg/ns"
"github.com/docker/docker/pkg/symlink"
"github.com/pkg/errors"
osinterface "github.com/containerd/cri/pkg/os"
)
// The NetNS library assumes only containerd manages the lifecycle of the
// network namespace mount. The only case that netns will be unmounted by
// someone else is node reboot.
// If this assumption is broken, NetNS won't be aware of the external
// unmount, and there will be a state mismatch.
// TODO(random-liu): Don't cache state, always load from the system.
// ErrClosedNetNS is the error returned when network namespace is closed.
var ErrClosedNetNS = errors.New("network namespace is closed")
// NetNS holds network namespace for sandbox
type NetNS struct {
sync.Mutex
ns cnins.NetNS
closed bool
restored bool
}
// NewNetNS creates a network namespace for the sandbox
func NewNetNS() (*NetNS, error) {
netns, err := cnins.NewNS()
if err != nil {
return nil, errors.Wrap(err, "failed to setup network namespace")
}
n := new(NetNS)
n.ns = netns
return n, nil
}
// LoadNetNS loads existing network namespace. It returns ErrClosedNetNS
// if the network namespace has already been closed.
func LoadNetNS(path string) (*NetNS, error) {
ns, err := cnins.GetNS(path)
if err != nil {
if _, ok := err.(cnins.NSPathNotExistErr); ok {
return nil, ErrClosedNetNS
}
if _, ok := err.(cnins.NSPathNotNSErr); ok {
// Do best effort cleanup.
os.RemoveAll(path) // nolint: errcheck
return nil, ErrClosedNetNS
}
return nil, errors.Wrap(err, "failed to load network namespace")
}
return &NetNS{ns: ns, restored: true}, nil
}
// Remove removes network namepace if it exists and not closed. Remove is idempotent,
// meaning it might be invoked multiple times and provides consistent result.
func (n *NetNS) Remove() error {
n.Lock()
defer n.Unlock()
if !n.closed {
err := n.ns.Close()
if err != nil {
return errors.Wrap(err, "failed to close network namespace")
}
n.closed = true
}
if n.restored {
path := n.ns.Path()
// Check netns existence.
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
return nil
}
return errors.Wrap(err, "failed to stat netns")
}
path, err := symlink.FollowSymlinkInScope(path, "/")
if err != nil {
return errors.Wrap(err, "failed to follow symlink")
}
if err := osinterface.Unmount(path); err != nil && !os.IsNotExist(err) {
return errors.Wrap(err, "failed to umount netns")
}
if err := os.RemoveAll(path); err != nil {
return errors.Wrap(err, "failed to remove netns")
}
n.restored = false
}
return nil
}
// Closed checks whether the network namespace has been closed.
func (n *NetNS) Closed() bool {
n.Lock()
defer n.Unlock()
return n.closed && !n.restored
}
// GetPath returns network namespace path for sandbox container
func (n *NetNS) GetPath() string {
n.Lock()
defer n.Unlock()
return n.ns.Path()
}
// GetNs returns the network namespace handle
func (n *NetNS) GetNs() cnins.NetNS {
n.Lock()
defer n.Unlock()
return n.ns
}

View File

@@ -22,6 +22,7 @@ import (
"github.com/containerd/containerd"
"github.com/docker/docker/pkg/truncindex"
"github.com/containerd/cri/pkg/netns"
"github.com/containerd/cri/pkg/store"
)
@@ -32,10 +33,12 @@ type Sandbox struct {
Metadata
// Status stores the status of the sandbox.
Status StatusStorage
// Container is the containerd sandbox container client
// Container is the containerd sandbox container client.
Container containerd.Container
// CNI network namespace client
NetNS *NetNS
// CNI network namespace client.
// For hostnetwork pod, this is always nil;
// For non hostnetwork pod, this should never be nil.
NetNS *netns.NetNS
// StopCh is used to propagate the stop information of the sandbox.
*store.StopCh
}

View File

@@ -3,7 +3,7 @@ github.com/blang/semver v3.1.0
github.com/BurntSushi/toml a368813c5e648fee92e5f6c30e3944ff9d5e8895
github.com/containerd/cgroups 5e610833b72089b37d0e615de9a92dfc043757c2
github.com/containerd/console c12b1e7919c14469339a5d38f2f8ed9b64a9de23
github.com/containerd/containerd 15f19d7a67fa322e6de0ef4c6a1bf9da0f056554
github.com/containerd/containerd 6937c5a3ba8280edff9e9030767e3b0cb742581c
github.com/containerd/continuity bd77b46c8352f74eb12c85bdc01f4b90f69d66b4
github.com/containerd/fifo 3d5202aec260678c48179c56f40e6f38a095738c
github.com/containerd/go-cni 40bcf8ec8acd7372be1d77031d585d5d8e561c90
@@ -21,11 +21,9 @@ github.com/docker/go-metrics 4ea375f7759c82740c893fc030bc37088d2ec098
github.com/docker/go-units v0.3.1
github.com/docker/spdystream 449fdfce4d962303d702fec724ef0ad181c92528
github.com/emicklei/go-restful v2.2.1
github.com/ghodss/yaml v1.0.0
github.com/godbus/dbus v3
github.com/gogo/googleapis 08a7655d27152912db7aaf4f983275eaf8d128ef
github.com/gogo/protobuf v1.0.0
github.com/golang/glog 44145f04b68cf362d9c4df2182967c2275eaefed
github.com/golang/protobuf v1.1.0
github.com/google/gofuzz 44d81051d367757e1c7c6a5a86423ece9afcf63c
github.com/grpc-ecosystem/go-grpc-prometheus v1.1
@@ -33,15 +31,15 @@ github.com/hashicorp/errwrap 7554cd9344cec97297fa6649b055a8c98c2a1e55
github.com/hashicorp/go-multierror ed905158d87462226a13fe39ddf685ea65f1c11f
github.com/json-iterator/go 1.1.5
github.com/matttproud/golang_protobuf_extensions v1.0.0
github.com/Microsoft/go-winio v0.4.10
github.com/Microsoft/hcsshim v0.7.6
github.com/Microsoft/go-winio v0.4.11
github.com/Microsoft/hcsshim v0.8.2
github.com/modern-go/concurrent 1.0.3
github.com/modern-go/reflect2 1.0.1
github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7
github.com/opencontainers/image-spec v1.0.1
github.com/opencontainers/runc 00dc70017d222b178a002ed30e9321b12647af2d
github.com/opencontainers/runc v1.0.0-rc6
github.com/opencontainers/runtime-spec eba862dc2470385a233c7507392675cbeadf7353
github.com/opencontainers/runtime-tools v0.6.0
github.com/opencontainers/runtime-tools fb101d5d42ab9c040f7d0a004e78336e5d5cb197
github.com/opencontainers/selinux b6fa367ed7f534f9ba25391cc2d467085dbb445a
github.com/pkg/errors v0.8.0
github.com/pmezard/go-difflib v1.0.0
@@ -50,7 +48,7 @@ github.com/prometheus/client_model 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c
github.com/prometheus/common 89604d197083d4781071d3c65855d24ecfb0a563
github.com/prometheus/procfs cb4147076ac75738c9a7d279075a253c0cc5acbd
github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0
github.com/sirupsen/logrus v1.0.0
github.com/sirupsen/logrus v1.0.3
github.com/stretchr/testify v1.1.4
github.com/syndtr/gocapability db04d3cc01c8b54962a58ec7e491717d06cfcc16
github.com/tchap/go-patricia v2.2.6
@@ -69,10 +67,12 @@ golang.org/x/time f51c12702a4d776e4c1fa9b0fabab841babae631
google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
google.golang.org/grpc v1.12.0
gopkg.in/inf.v0 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4
gopkg.in/yaml.v2 53feefa2559fb8dfa8d81baad31be332c97d6c77
k8s.io/api kubernetes-1.12.0
k8s.io/apimachinery kubernetes-1.12.0
k8s.io/apiserver kubernetes-1.12.0
k8s.io/client-go kubernetes-1.12.0
k8s.io/kubernetes v1.12.0
k8s.io/utils cd34563cd63c2bd7c6fe88a73c4dcf34ed8a67cb
gopkg.in/yaml.v2 v2.2.1
k8s.io/api kubernetes-1.13.0
k8s.io/apimachinery kubernetes-1.13.0
k8s.io/apiserver kubernetes-1.13.0
k8s.io/client-go kubernetes-1.13.0
k8s.io/klog 8139d8cb77af419532b33dfa7dd09fbc5f1d344f
k8s.io/kubernetes v1.13.0
k8s.io/utils 0d26856f57b32ec3398579285e5c8a2bfe8c5243
sigs.k8s.io/yaml v1.1.0

View File

@@ -1,50 +0,0 @@
The MIT License (MIT)
Copyright (c) 2014 Sam Ghods
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Copyright (c) 2012 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -1,121 +0,0 @@
# YAML marshaling and unmarshaling support for Go
[![Build Status](https://travis-ci.org/ghodss/yaml.svg)](https://travis-ci.org/ghodss/yaml)
## Introduction
A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs.
In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/).
## Compatibility
This package uses [go-yaml](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility).
## Caveats
**Caveat #1:** When using `yaml.Marshal` and `yaml.Unmarshal`, binary data should NOT be preceded with the `!!binary` YAML tag. If you do, go-yaml will convert the binary data from base64 to native binary data, which is not compatible with JSON. You can still use binary in your YAML files though - just store them without the `!!binary` tag and decode the base64 in your code (e.g. in the custom JSON methods `MarshalJSON` and `UnmarshalJSON`). This also has the benefit that your YAML and your JSON binary data will be decoded exactly the same way. As an example:
```
BAD:
exampleKey: !!binary gIGC
GOOD:
exampleKey: gIGC
... and decode the base64 data in your code.
```
**Caveat #2:** When using `YAMLToJSON` directly, maps with keys that are maps will result in an error since this is not supported by JSON. This error will occur in `Unmarshal` as well since you can't unmarshal map keys anyways since struct fields can't be keys.
## Installation and usage
To install, run:
```
$ go get github.com/ghodss/yaml
```
And import using:
```
import "github.com/ghodss/yaml"
```
Usage is very similar to the JSON library:
```go
package main
import (
"fmt"
"github.com/ghodss/yaml"
)
type Person struct {
Name string `json:"name"` // Affects YAML field names too.
Age int `json:"age"`
}
func main() {
// Marshal a Person struct to YAML.
p := Person{"John", 30}
y, err := yaml.Marshal(p)
if err != nil {
fmt.Printf("err: %v\n", err)
return
}
fmt.Println(string(y))
/* Output:
age: 30
name: John
*/
// Unmarshal the YAML back into a Person struct.
var p2 Person
err = yaml.Unmarshal(y, &p2)
if err != nil {
fmt.Printf("err: %v\n", err)
return
}
fmt.Println(p2)
/* Output:
{John 30}
*/
}
```
`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available:
```go
package main
import (
"fmt"
"github.com/ghodss/yaml"
)
func main() {
j := []byte(`{"name": "John", "age": 30}`)
y, err := yaml.JSONToYAML(j)
if err != nil {
fmt.Printf("err: %v\n", err)
return
}
fmt.Println(string(y))
/* Output:
name: John
age: 30
*/
j2, err := yaml.YAMLToJSON(y)
if err != nil {
fmt.Printf("err: %v\n", err)
return
}
fmt.Println(string(j2))
/* Output:
{"age":30,"name":"John"}
*/
}
```

View File

@@ -1,501 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package yaml
import (
"bytes"
"encoding"
"encoding/json"
"reflect"
"sort"
"strings"
"sync"
"unicode"
"unicode/utf8"
)
// indirect walks down v allocating pointers as needed,
// until it gets to a non-pointer.
// if it encounters an Unmarshaler, indirect stops and returns that.
// if decodingNull is true, indirect stops at the last pointer so it can be set to nil.
func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) {
// If v is a named type and is addressable,
// start with its address, so that if the type has pointer methods,
// we find them.
if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
v = v.Addr()
}
for {
// Load value from interface, but only if the result will be
// usefully addressable.
if v.Kind() == reflect.Interface && !v.IsNil() {
e := v.Elem()
if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
v = e
continue
}
}
if v.Kind() != reflect.Ptr {
break
}
if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
break
}
if v.IsNil() {
if v.CanSet() {
v.Set(reflect.New(v.Type().Elem()))
} else {
v = reflect.New(v.Type().Elem())
}
}
if v.Type().NumMethod() > 0 {
if u, ok := v.Interface().(json.Unmarshaler); ok {
return u, nil, reflect.Value{}
}
if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
return nil, u, reflect.Value{}
}
}
v = v.Elem()
}
return nil, nil, v
}
// A field represents a single field found in a struct.
type field struct {
name string
nameBytes []byte // []byte(name)
equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent
tag bool
index []int
typ reflect.Type
omitEmpty bool
quoted bool
}
func fillField(f field) field {
f.nameBytes = []byte(f.name)
f.equalFold = foldFunc(f.nameBytes)
return f
}
// byName sorts field by name, breaking ties with depth,
// then breaking ties with "name came from json tag", then
// breaking ties with index sequence.
type byName []field
func (x byName) Len() int { return len(x) }
func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byName) Less(i, j int) bool {
if x[i].name != x[j].name {
return x[i].name < x[j].name
}
if len(x[i].index) != len(x[j].index) {
return len(x[i].index) < len(x[j].index)
}
if x[i].tag != x[j].tag {
return x[i].tag
}
return byIndex(x).Less(i, j)
}
// byIndex sorts field by index sequence.
type byIndex []field
func (x byIndex) Len() int { return len(x) }
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byIndex) Less(i, j int) bool {
for k, xik := range x[i].index {
if k >= len(x[j].index) {
return false
}
if xik != x[j].index[k] {
return xik < x[j].index[k]
}
}
return len(x[i].index) < len(x[j].index)
}
// typeFields returns a list of fields that JSON should recognize for the given type.
// The algorithm is breadth-first search over the set of structs to include - the top struct
// and then any reachable anonymous structs.
func typeFields(t reflect.Type) []field {
// Anonymous fields to explore at the current level and the next.
current := []field{}
next := []field{{typ: t}}
// Count of queued names for current level and the next.
count := map[reflect.Type]int{}
nextCount := map[reflect.Type]int{}
// Types already visited at an earlier level.
visited := map[reflect.Type]bool{}
// Fields found.
var fields []field
for len(next) > 0 {
current, next = next, current[:0]
count, nextCount = nextCount, map[reflect.Type]int{}
for _, f := range current {
if visited[f.typ] {
continue
}
visited[f.typ] = true
// Scan f.typ for fields to include.
for i := 0; i < f.typ.NumField(); i++ {
sf := f.typ.Field(i)
if sf.PkgPath != "" { // unexported
continue
}
tag := sf.Tag.Get("json")
if tag == "-" {
continue
}
name, opts := parseTag(tag)
if !isValidTag(name) {
name = ""
}
index := make([]int, len(f.index)+1)
copy(index, f.index)
index[len(f.index)] = i
ft := sf.Type
if ft.Name() == "" && ft.Kind() == reflect.Ptr {
// Follow pointer.
ft = ft.Elem()
}
// Record found field and index sequence.
if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
tagged := name != ""
if name == "" {
name = sf.Name
}
fields = append(fields, fillField(field{
name: name,
tag: tagged,
index: index,
typ: ft,
omitEmpty: opts.Contains("omitempty"),
quoted: opts.Contains("string"),
}))
if count[f.typ] > 1 {
// If there were multiple instances, add a second,
// so that the annihilation code will see a duplicate.
// It only cares about the distinction between 1 or 2,
// so don't bother generating any more copies.
fields = append(fields, fields[len(fields)-1])
}
continue
}
// Record new anonymous struct to explore in next round.
nextCount[ft]++
if nextCount[ft] == 1 {
next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft}))
}
}
}
}
sort.Sort(byName(fields))
// Delete all fields that are hidden by the Go rules for embedded fields,
// except that fields with JSON tags are promoted.
// The fields are sorted in primary order of name, secondary order
// of field index length. Loop over names; for each name, delete
// hidden fields by choosing the one dominant field that survives.
out := fields[:0]
for advance, i := 0, 0; i < len(fields); i += advance {
// One iteration per name.
// Find the sequence of fields with the name of this first field.
fi := fields[i]
name := fi.name
for advance = 1; i+advance < len(fields); advance++ {
fj := fields[i+advance]
if fj.name != name {
break
}
}
if advance == 1 { // Only one field with this name
out = append(out, fi)
continue
}
dominant, ok := dominantField(fields[i : i+advance])
if ok {
out = append(out, dominant)
}
}
fields = out
sort.Sort(byIndex(fields))
return fields
}
// dominantField looks through the fields, all of which are known to
// have the same name, to find the single field that dominates the
// others using Go's embedding rules, modified by the presence of
// JSON tags. If there are multiple top-level fields, the boolean
// will be false: This condition is an error in Go and we skip all
// the fields.
func dominantField(fields []field) (field, bool) {
// The fields are sorted in increasing index-length order. The winner
// must therefore be one with the shortest index length. Drop all
// longer entries, which is easy: just truncate the slice.
length := len(fields[0].index)
tagged := -1 // Index of first tagged field.
for i, f := range fields {
if len(f.index) > length {
fields = fields[:i]
break
}
if f.tag {
if tagged >= 0 {
// Multiple tagged fields at the same level: conflict.
// Return no field.
return field{}, false
}
tagged = i
}
}
if tagged >= 0 {
return fields[tagged], true
}
// All remaining fields have the same length. If there's more than one,
// we have a conflict (two fields named "X" at the same level) and we
// return no field.
if len(fields) > 1 {
return field{}, false
}
return fields[0], true
}
var fieldCache struct {
sync.RWMutex
m map[reflect.Type][]field
}
// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
func cachedTypeFields(t reflect.Type) []field {
fieldCache.RLock()
f := fieldCache.m[t]
fieldCache.RUnlock()
if f != nil {
return f
}
// Compute fields without lock.
// Might duplicate effort but won't hold other computations back.
f = typeFields(t)
if f == nil {
f = []field{}
}
fieldCache.Lock()
if fieldCache.m == nil {
fieldCache.m = map[reflect.Type][]field{}
}
fieldCache.m[t] = f
fieldCache.Unlock()
return f
}
func isValidTag(s string) bool {
if s == "" {
return false
}
for _, c := range s {
switch {
case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
// Backslash and quote chars are reserved, but
// otherwise any punctuation chars are allowed
// in a tag name.
default:
if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
return false
}
}
}
return true
}
const (
caseMask = ^byte(0x20) // Mask to ignore case in ASCII.
kelvin = '\u212a'
smallLongEss = '\u017f'
)
// foldFunc returns one of four different case folding equivalence
// functions, from most general (and slow) to fastest:
//
// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
// 3) asciiEqualFold, no special, but includes non-letters (including _)
// 4) simpleLetterEqualFold, no specials, no non-letters.
//
// The letters S and K are special because they map to 3 runes, not just 2:
// * S maps to s and to U+017F 'ſ' Latin small letter long s
// * k maps to K and to U+212A '' Kelvin sign
// See http://play.golang.org/p/tTxjOc0OGo
//
// The returned function is specialized for matching against s and
// should only be given s. It's not curried for performance reasons.
func foldFunc(s []byte) func(s, t []byte) bool {
nonLetter := false
special := false // special letter
for _, b := range s {
if b >= utf8.RuneSelf {
return bytes.EqualFold
}
upper := b & caseMask
if upper < 'A' || upper > 'Z' {
nonLetter = true
} else if upper == 'K' || upper == 'S' {
// See above for why these letters are special.
special = true
}
}
if special {
return equalFoldRight
}
if nonLetter {
return asciiEqualFold
}
return simpleLetterEqualFold
}
// equalFoldRight is a specialization of bytes.EqualFold when s is
// known to be all ASCII (including punctuation), but contains an 's',
// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
// See comments on foldFunc.
func equalFoldRight(s, t []byte) bool {
for _, sb := range s {
if len(t) == 0 {
return false
}
tb := t[0]
if tb < utf8.RuneSelf {
if sb != tb {
sbUpper := sb & caseMask
if 'A' <= sbUpper && sbUpper <= 'Z' {
if sbUpper != tb&caseMask {
return false
}
} else {
return false
}
}
t = t[1:]
continue
}
// sb is ASCII and t is not. t must be either kelvin
// sign or long s; sb must be s, S, k, or K.
tr, size := utf8.DecodeRune(t)
switch sb {
case 's', 'S':
if tr != smallLongEss {
return false
}
case 'k', 'K':
if tr != kelvin {
return false
}
default:
return false
}
t = t[size:]
}
if len(t) > 0 {
return false
}
return true
}
// asciiEqualFold is a specialization of bytes.EqualFold for use when
// s is all ASCII (but may contain non-letters) and contains no
// special-folding letters.
// See comments on foldFunc.
func asciiEqualFold(s, t []byte) bool {
if len(s) != len(t) {
return false
}
for i, sb := range s {
tb := t[i]
if sb == tb {
continue
}
if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
if sb&caseMask != tb&caseMask {
return false
}
} else {
return false
}
}
return true
}
// simpleLetterEqualFold is a specialization of bytes.EqualFold for
// use when s is all ASCII letters (no underscores, etc) and also
// doesn't contain 'k', 'K', 's', or 'S'.
// See comments on foldFunc.
func simpleLetterEqualFold(s, t []byte) bool {
if len(s) != len(t) {
return false
}
for i, b := range s {
if b&caseMask != t[i]&caseMask {
return false
}
}
return true
}
// tagOptions is the string following a comma in a struct field's "json"
// tag, or the empty string. It does not include the leading comma.
type tagOptions string
// parseTag splits a struct field's json tag into its name and
// comma-separated options.
func parseTag(tag string) (string, tagOptions) {
if idx := strings.Index(tag, ","); idx != -1 {
return tag[:idx], tagOptions(tag[idx+1:])
}
return tag, tagOptions("")
}
// Contains reports whether a comma-separated list of options
// contains a particular substr flag. substr must be surrounded by a
// string boundary or commas.
func (o tagOptions) Contains(optionName string) bool {
if len(o) == 0 {
return false
}
s := string(o)
for s != "" {
var next string
i := strings.Index(s, ",")
if i >= 0 {
s, next = s[:i], s[i+1:]
}
if s == optionName {
return true
}
s = next
}
return false
}

277
vendor/github.com/ghodss/yaml/yaml.go generated vendored
View File

@@ -1,277 +0,0 @@
package yaml
import (
"bytes"
"encoding/json"
"fmt"
"reflect"
"strconv"
"gopkg.in/yaml.v2"
)
// Marshals the object into JSON then converts JSON to YAML and returns the
// YAML.
func Marshal(o interface{}) ([]byte, error) {
j, err := json.Marshal(o)
if err != nil {
return nil, fmt.Errorf("error marshaling into JSON: %v", err)
}
y, err := JSONToYAML(j)
if err != nil {
return nil, fmt.Errorf("error converting JSON to YAML: %v", err)
}
return y, nil
}
// Converts YAML to JSON then uses JSON to unmarshal into an object.
func Unmarshal(y []byte, o interface{}) error {
vo := reflect.ValueOf(o)
j, err := yamlToJSON(y, &vo)
if err != nil {
return fmt.Errorf("error converting YAML to JSON: %v", err)
}
err = json.Unmarshal(j, o)
if err != nil {
return fmt.Errorf("error unmarshaling JSON: %v", err)
}
return nil
}
// Convert JSON to YAML.
func JSONToYAML(j []byte) ([]byte, error) {
// Convert the JSON to an object.
var jsonObj interface{}
// We are using yaml.Unmarshal here (instead of json.Unmarshal) because the
// Go JSON library doesn't try to pick the right number type (int, float,
// etc.) when unmarshalling to interface{}, it just picks float64
// universally. go-yaml does go through the effort of picking the right
// number type, so we can preserve number type throughout this process.
err := yaml.Unmarshal(j, &jsonObj)
if err != nil {
return nil, err
}
// Marshal this object into YAML.
return yaml.Marshal(jsonObj)
}
// Convert YAML to JSON. Since JSON is a subset of YAML, passing JSON through
// this method should be a no-op.
//
// Things YAML can do that are not supported by JSON:
// * In YAML you can have binary and null keys in your maps. These are invalid
// in JSON. (int and float keys are converted to strings.)
// * Binary data in YAML with the !!binary tag is not supported. If you want to
// use binary data with this library, encode the data as base64 as usual but do
// not use the !!binary tag in your YAML. This will ensure the original base64
// encoded data makes it all the way through to the JSON.
func YAMLToJSON(y []byte) ([]byte, error) {
return yamlToJSON(y, nil)
}
func yamlToJSON(y []byte, jsonTarget *reflect.Value) ([]byte, error) {
// Convert the YAML to an object.
var yamlObj interface{}
err := yaml.Unmarshal(y, &yamlObj)
if err != nil {
return nil, err
}
// YAML objects are not completely compatible with JSON objects (e.g. you
// can have non-string keys in YAML). So, convert the YAML-compatible object
// to a JSON-compatible object, failing with an error if irrecoverable
// incompatibilties happen along the way.
jsonObj, err := convertToJSONableObject(yamlObj, jsonTarget)
if err != nil {
return nil, err
}
// Convert this object to JSON and return the data.
return json.Marshal(jsonObj)
}
func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) {
var err error
// Resolve jsonTarget to a concrete value (i.e. not a pointer or an
// interface). We pass decodingNull as false because we're not actually
// decoding into the value, we're just checking if the ultimate target is a
// string.
if jsonTarget != nil {
ju, tu, pv := indirect(*jsonTarget, false)
// We have a JSON or Text Umarshaler at this level, so we can't be trying
// to decode into a string.
if ju != nil || tu != nil {
jsonTarget = nil
} else {
jsonTarget = &pv
}
}
// If yamlObj is a number or a boolean, check if jsonTarget is a string -
// if so, coerce. Else return normal.
// If yamlObj is a map or array, find the field that each key is
// unmarshaling to, and when you recurse pass the reflect.Value for that
// field back into this function.
switch typedYAMLObj := yamlObj.(type) {
case map[interface{}]interface{}:
// JSON does not support arbitrary keys in a map, so we must convert
// these keys to strings.
//
// From my reading of go-yaml v2 (specifically the resolve function),
// keys can only have the types string, int, int64, float64, binary
// (unsupported), or null (unsupported).
strMap := make(map[string]interface{})
for k, v := range typedYAMLObj {
// Resolve the key to a string first.
var keyString string
switch typedKey := k.(type) {
case string:
keyString = typedKey
case int:
keyString = strconv.Itoa(typedKey)
case int64:
// go-yaml will only return an int64 as a key if the system
// architecture is 32-bit and the key's value is between 32-bit
// and 64-bit. Otherwise the key type will simply be int.
keyString = strconv.FormatInt(typedKey, 10)
case float64:
// Stolen from go-yaml to use the same conversion to string as
// the go-yaml library uses to convert float to string when
// Marshaling.
s := strconv.FormatFloat(typedKey, 'g', -1, 32)
switch s {
case "+Inf":
s = ".inf"
case "-Inf":
s = "-.inf"
case "NaN":
s = ".nan"
}
keyString = s
case bool:
if typedKey {
keyString = "true"
} else {
keyString = "false"
}
default:
return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v",
reflect.TypeOf(k), k, v)
}
// jsonTarget should be a struct or a map. If it's a struct, find
// the field it's going to map to and pass its reflect.Value. If
// it's a map, find the element type of the map and pass the
// reflect.Value created from that type. If it's neither, just pass
// nil - JSON conversion will error for us if it's a real issue.
if jsonTarget != nil {
t := *jsonTarget
if t.Kind() == reflect.Struct {
keyBytes := []byte(keyString)
// Find the field that the JSON library would use.
var f *field
fields := cachedTypeFields(t.Type())
for i := range fields {
ff := &fields[i]
if bytes.Equal(ff.nameBytes, keyBytes) {
f = ff
break
}
// Do case-insensitive comparison.
if f == nil && ff.equalFold(ff.nameBytes, keyBytes) {
f = ff
}
}
if f != nil {
// Find the reflect.Value of the most preferential
// struct field.
jtf := t.Field(f.index[0])
strMap[keyString], err = convertToJSONableObject(v, &jtf)
if err != nil {
return nil, err
}
continue
}
} else if t.Kind() == reflect.Map {
// Create a zero value of the map's element type to use as
// the JSON target.
jtv := reflect.Zero(t.Type().Elem())
strMap[keyString], err = convertToJSONableObject(v, &jtv)
if err != nil {
return nil, err
}
continue
}
}
strMap[keyString], err = convertToJSONableObject(v, nil)
if err != nil {
return nil, err
}
}
return strMap, nil
case []interface{}:
// We need to recurse into arrays in case there are any
// map[interface{}]interface{}'s inside and to convert any
// numbers to strings.
// If jsonTarget is a slice (which it really should be), find the
// thing it's going to map to. If it's not a slice, just pass nil
// - JSON conversion will error for us if it's a real issue.
var jsonSliceElemValue *reflect.Value
if jsonTarget != nil {
t := *jsonTarget
if t.Kind() == reflect.Slice {
// By default slices point to nil, but we need a reflect.Value
// pointing to a value of the slice type, so we create one here.
ev := reflect.Indirect(reflect.New(t.Type().Elem()))
jsonSliceElemValue = &ev
}
}
// Make and use a new array.
arr := make([]interface{}, len(typedYAMLObj))
for i, v := range typedYAMLObj {
arr[i], err = convertToJSONableObject(v, jsonSliceElemValue)
if err != nil {
return nil, err
}
}
return arr, nil
default:
// If the target type is a string and the YAML type is a number,
// convert the YAML type to a string.
if jsonTarget != nil && (*jsonTarget).Kind() == reflect.String {
// Based on my reading of go-yaml, it may return int, int64,
// float64, or uint64.
var s string
switch typedVal := typedYAMLObj.(type) {
case int:
s = strconv.FormatInt(int64(typedVal), 10)
case int64:
s = strconv.FormatInt(typedVal, 10)
case float64:
s = strconv.FormatFloat(typedVal, 'g', -1, 32)
case uint64:
s = strconv.FormatUint(typedVal, 10)
case bool:
if typedVal {
s = "true"
} else {
s = "false"
}
}
if len(s) > 0 {
yamlObj = interface{}(s)
}
}
return yamlObj, nil
}
return nil, nil
}

191
vendor/github.com/golang/glog/LICENSE generated vendored
View File

@@ -1,191 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction, and
distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by the copyright
owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all other entities
that control, are controlled by, or are under common control with that entity.
For the purposes of this definition, "control" means (i) the power, direct or
indirect, to cause the direction or management of such entity, whether by
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity exercising
permissions granted by this License.
"Source" form shall mean the preferred form for making modifications, including
but not limited to software source code, documentation source, and configuration
files.
"Object" form shall mean any form resulting from mechanical transformation or
translation of a Source form, including but not limited to compiled object code,
generated documentation, and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or Object form, made
available under the License, as indicated by a copyright notice that is included
in or attached to the work (an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object form, that
is based on (or derived from) the Work and for which the editorial revisions,
annotations, elaborations, or other modifications represent, as a whole, an
original work of authorship. For the purposes of this License, Derivative Works
shall not include works that remain separable from, or merely link (or bind by
name) to the interfaces of, the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including the original version
of the Work and any modifications or additions to that Work or Derivative Works
thereof, that is intentionally submitted to Licensor for inclusion in the Work
by the copyright owner or by an individual or Legal Entity authorized to submit
on behalf of the copyright owner. For the purposes of this definition,
"submitted" means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems, and
issue tracking systems that are managed by, or on behalf of, the Licensor for
the purpose of discussing and improving the Work, but excluding communication
that is conspicuously marked or otherwise designated in writing by the copyright
owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
of whom a Contribution has been received by Licensor and subsequently
incorporated within the Work.
2. Grant of Copyright License.
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the Work and such
Derivative Works in Source or Object form.
3. Grant of Patent License.
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable (except as stated in this section) patent license to make, have
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
such license applies only to those patent claims licensable by such Contributor
that are necessarily infringed by their Contribution(s) alone or by combination
of their Contribution(s) with the Work to which such Contribution(s) was
submitted. If You institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
Contribution incorporated within the Work constitutes direct or contributory
patent infringement, then any patent licenses granted to You under this License
for that Work shall terminate as of the date such litigation is filed.
4. Redistribution.
You may reproduce and distribute copies of the Work or Derivative Works thereof
in any medium, with or without modifications, and in Source or Object form,
provided that You meet the following conditions:
You must give any other recipients of the Work or Derivative Works a copy of
this License; and
You must cause any modified files to carry prominent notices stating that You
changed the files; and
You must retain, in the Source form of any Derivative Works that You distribute,
all copyright, patent, trademark, and attribution notices from the Source form
of the Work, excluding those notices that do not pertain to any part of the
Derivative Works; and
If the Work includes a "NOTICE" text file as part of its distribution, then any
Derivative Works that You distribute must include a readable copy of the
attribution notices contained within such NOTICE file, excluding those notices
that do not pertain to any part of the Derivative Works, in at least one of the
following places: within a NOTICE text file distributed as part of the
Derivative Works; within the Source form or documentation, if provided along
with the Derivative Works; or, within a display generated by the Derivative
Works, if and wherever such third-party notices normally appear. The contents of
the NOTICE file are for informational purposes only and do not modify the
License. You may add Your own attribution notices within Derivative Works that
You distribute, alongside or as an addendum to the NOTICE text from the Work,
provided that such additional attribution notices cannot be construed as
modifying the License.
You may add Your own copyright statement to Your modifications and may provide
additional or different license terms and conditions for use, reproduction, or
distribution of Your modifications, or for any such Derivative Works as a whole,
provided Your use, reproduction, and distribution of the Work otherwise complies
with the conditions stated in this License.
5. Submission of Contributions.
Unless You explicitly state otherwise, any Contribution intentionally submitted
for inclusion in the Work by You to the Licensor shall be under the terms and
conditions of this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify the terms of
any separate license agreement you may have executed with Licensor regarding
such Contributions.
6. Trademarks.
This License does not grant permission to use the trade names, trademarks,
service marks, or product names of the Licensor, except as required for
reasonable and customary use in describing the origin of the Work and
reproducing the content of the NOTICE file.
7. Disclaimer of Warranty.
Unless required by applicable law or agreed to in writing, Licensor provides the
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
including, without limitation, any warranties or conditions of TITLE,
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
solely responsible for determining the appropriateness of using or
redistributing the Work and assume any risks associated with Your exercise of
permissions under this License.
8. Limitation of Liability.
In no event and under no legal theory, whether in tort (including negligence),
contract, or otherwise, unless required by applicable law (such as deliberate
and grossly negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special, incidental,
or consequential damages of any character arising as a result of this License or
out of the use or inability to use the Work (including but not limited to
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
any and all other commercial damages or losses), even if such Contributor has
been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability.
While redistributing the Work or Derivative Works thereof, You may choose to
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
other liability obligations and/or rights consistent with this License. However,
in accepting such obligations, You may act only on Your own behalf and on Your
sole responsibility, not on behalf of any other Contributor, and only if You
agree to indemnify, defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason of your
accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work
To apply the Apache License to your work, attach the following boilerplate
notice, with the fields enclosed by brackets "[]" replaced with your own
identifying information. (Don't include the brackets!) The text should be
enclosed in the appropriate comment syntax for the file format. We also
recommend that a file or class name and description of purpose be included on
the same "printed page" as the copyright notice for easier identification within
third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

44
vendor/github.com/golang/glog/README generated vendored
View File

@@ -1,44 +0,0 @@
glog
====
Leveled execution logs for Go.
This is an efficient pure Go implementation of leveled logs in the
manner of the open source C++ package
http://code.google.com/p/google-glog
By binding methods to booleans it is possible to use the log package
without paying the expense of evaluating the arguments to the log.
Through the -vmodule flag, the package also provides fine-grained
control over logging at the file level.
The comment from glog.go introduces the ideas:
Package glog implements logging analogous to the Google-internal
C++ INFO/ERROR/V setup. It provides functions Info, Warning,
Error, Fatal, plus formatting variants such as Infof. It
also provides V-style logging controlled by the -v and
-vmodule=file=2 flags.
Basic examples:
glog.Info("Prepare to repel boarders")
glog.Fatalf("Initialization failed: %s", err)
See the documentation for the V function for an explanation
of these examples:
if glog.V(2) {
glog.Info("Starting transaction...")
}
glog.V(2).Infoln("Processed", nItems, "elements")
The repository contains an open source version of the log package
used inside Google. The master copy of the source lives inside
Google, not here. The code in this repo is for export only and is not itself
under development. Feature requests will be ignored.
Send bug reports to golang-nuts@googlegroups.com.

1177
vendor/github.com/golang/glog/glog.go generated vendored

File diff suppressed because it is too large Load Diff

View File

@@ -1,124 +0,0 @@
// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
//
// Copyright 2013 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// File I/O for logs.
package glog
import (
"errors"
"flag"
"fmt"
"os"
"os/user"
"path/filepath"
"strings"
"sync"
"time"
)
// MaxSize is the maximum size of a log file in bytes.
var MaxSize uint64 = 1024 * 1024 * 1800
// logDirs lists the candidate directories for new log files.
var logDirs []string
// If non-empty, overrides the choice of directory in which to write logs.
// See createLogDirs for the full list of possible destinations.
var logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory")
func createLogDirs() {
if *logDir != "" {
logDirs = append(logDirs, *logDir)
}
logDirs = append(logDirs, os.TempDir())
}
var (
pid = os.Getpid()
program = filepath.Base(os.Args[0])
host = "unknownhost"
userName = "unknownuser"
)
func init() {
h, err := os.Hostname()
if err == nil {
host = shortHostname(h)
}
current, err := user.Current()
if err == nil {
userName = current.Username
}
// Sanitize userName since it may contain filepath separators on Windows.
userName = strings.Replace(userName, `\`, "_", -1)
}
// shortHostname returns its argument, truncating at the first period.
// For instance, given "www.google.com" it returns "www".
func shortHostname(hostname string) string {
if i := strings.Index(hostname, "."); i >= 0 {
return hostname[:i]
}
return hostname
}
// logName returns a new log file name containing tag, with start time t, and
// the name for the symlink for tag.
func logName(tag string, t time.Time) (name, link string) {
name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d",
program,
host,
userName,
tag,
t.Year(),
t.Month(),
t.Day(),
t.Hour(),
t.Minute(),
t.Second(),
pid)
return name, program + "." + tag
}
var onceLogDirs sync.Once
// create creates a new log file and returns the file and its filename, which
// contains tag ("INFO", "FATAL", etc.) and t. If the file is created
// successfully, create also attempts to update the symlink for that tag, ignoring
// errors.
func create(tag string, t time.Time) (f *os.File, filename string, err error) {
onceLogDirs.Do(createLogDirs)
if len(logDirs) == 0 {
return nil, "", errors.New("log: no log dirs")
}
name, link := logName(tag, t)
var lastErr error
for _, dir := range logDirs {
fname := filepath.Join(dir, name)
f, err := os.Create(fname)
if err == nil {
symlink := filepath.Join(dir, link)
os.Remove(symlink) // ignore err
os.Symlink(name, symlink) // ignore err
return f, fname, nil
}
lastErr = err
}
return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr)
}