Merge pull request #2702 from Random-Liu/update-cri-release-1.2
Update cri to 8506fe836677cc3bb23a16b68145128243d843b5.
This commit is contained in:
commit
15f19d7a67
@ -21,7 +21,7 @@
|
||||
set -eu -o pipefail
|
||||
|
||||
go get -u github.com/onsi/ginkgo/ginkgo
|
||||
CRITEST_COMMIT=f37a5a1edb69ee742c6e42c57413fe6b63788085
|
||||
CRITEST_COMMIT=v1.12.0
|
||||
go get -d github.com/kubernetes-incubator/cri-tools/...
|
||||
cd $GOPATH/src/github.com/kubernetes-incubator/cri-tools
|
||||
git checkout $CRITEST_COMMIT
|
||||
|
14
vendor.conf
14
vendor.conf
@ -43,7 +43,7 @@ github.com/google/go-cmp v0.1.0
|
||||
go.etcd.io/bbolt v1.3.1-etcd.8
|
||||
|
||||
# cri dependencies
|
||||
github.com/containerd/cri 9f39e3289533fc228c5e5fcac0a6dbdd60c6047b # release/1.2 branch
|
||||
github.com/containerd/cri 8506fe836677cc3bb23a16b68145128243d843b5 # release/1.2 branch
|
||||
github.com/containerd/go-cni 6d7b509a054a3cb1c35ed1865d4fde2f0cb547cd
|
||||
github.com/blang/semver v3.1.0
|
||||
github.com/containernetworking/cni v0.6.0
|
||||
@ -73,12 +73,12 @@ golang.org/x/oauth2 a6bd8cefa1811bd24b86f8902872e4e8225f74c4
|
||||
golang.org/x/time f51c12702a4d776e4c1fa9b0fabab841babae631
|
||||
gopkg.in/inf.v0 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4
|
||||
gopkg.in/yaml.v2 v2.2.1
|
||||
k8s.io/api 012f271b5d41baad56190c5f1ae19bff16df0fd8
|
||||
k8s.io/apimachinery 6429050ef506887d121f3e7306e894f8900d8a63
|
||||
k8s.io/apiserver e9312c15296b6c2c923ebd5031ff5d1d5fd022d7
|
||||
k8s.io/client-go 37c3c02ec96533daec0dbda1f39a6b1d68505c79
|
||||
k8s.io/kubernetes v1.12.0-beta.1
|
||||
k8s.io/utils 982821ea41da7e7c15f3d3738921eb2e7e241ccd
|
||||
k8s.io/api kubernetes-1.12.0
|
||||
k8s.io/apimachinery kubernetes-1.12.0
|
||||
k8s.io/apiserver kubernetes-1.12.0
|
||||
k8s.io/client-go kubernetes-1.12.0
|
||||
k8s.io/kubernetes v1.12.0
|
||||
k8s.io/utils cd34563cd63c2bd7c6fe88a73c4dcf34ed8a67cb
|
||||
|
||||
# zfs dependencies
|
||||
github.com/containerd/zfs 9a0b8b8b5982014b729cd34eb7cd7a11062aa6ec
|
||||
|
38
vendor/github.com/containerd/cri/pkg/server/events.go
generated
vendored
38
vendor/github.com/containerd/cri/pkg/server/events.go
generated
vendored
@ -34,7 +34,6 @@ import (
|
||||
ctrdutil "github.com/containerd/cri/pkg/containerd/util"
|
||||
"github.com/containerd/cri/pkg/store"
|
||||
containerstore "github.com/containerd/cri/pkg/store/container"
|
||||
imagestore "github.com/containerd/cri/pkg/store/image"
|
||||
sandboxstore "github.com/containerd/cri/pkg/store/sandbox"
|
||||
)
|
||||
|
||||
@ -42,15 +41,19 @@ const (
|
||||
backOffInitDuration = 1 * time.Second
|
||||
backOffMaxDuration = 5 * time.Minute
|
||||
backOffExpireCheckDuration = 1 * time.Second
|
||||
|
||||
// handleEventTimeout is the timeout for handling 1 event. Event monitor
|
||||
// handles events in serial, if one event blocks the event monitor, no
|
||||
// other events can be handled.
|
||||
// Add a timeout for each event handling, events that timeout will be requeued and
|
||||
// handled again in the future.
|
||||
handleEventTimeout = 10 * time.Second
|
||||
)
|
||||
|
||||
// eventMonitor monitors containerd event and updates internal state correspondingly.
|
||||
// TODO(random-liu): [P1] Figure out is it possible to drop event during containerd
|
||||
// is running. If it is, we should do periodically list to sync state with containerd.
|
||||
// TODO(random-liu): Handle event for each container in a separate goroutine.
|
||||
type eventMonitor struct {
|
||||
containerStore *containerstore.Store
|
||||
sandboxStore *sandboxstore.Store
|
||||
imageStore *imagestore.Store
|
||||
c *criService
|
||||
ch <-chan *events.Envelope
|
||||
errCh <-chan error
|
||||
ctx context.Context
|
||||
@ -78,13 +81,11 @@ type backOffQueue struct {
|
||||
|
||||
// Create new event monitor. New event monitor will start subscribing containerd event. All events
|
||||
// happen after it should be monitored.
|
||||
func newEventMonitor(c *containerstore.Store, s *sandboxstore.Store, i *imagestore.Store) *eventMonitor {
|
||||
func newEventMonitor(c *criService) *eventMonitor {
|
||||
// event subscribe doesn't need namespace.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
return &eventMonitor{
|
||||
containerStore: c,
|
||||
sandboxStore: s,
|
||||
imageStore: i,
|
||||
c: c,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
backOff: newBackOff(),
|
||||
@ -189,6 +190,9 @@ func (em *eventMonitor) stop() {
|
||||
// handleEvent handles a containerd event.
|
||||
func (em *eventMonitor) handleEvent(any interface{}) error {
|
||||
ctx := ctrdutil.NamespacedContext()
|
||||
ctx, cancel := context.WithTimeout(ctx, handleEventTimeout)
|
||||
defer cancel()
|
||||
|
||||
switch any.(type) {
|
||||
// If containerd-shim exits unexpectedly, there will be no corresponding event.
|
||||
// However, containerd could not retrieve container state in that case, so it's
|
||||
@ -197,7 +201,7 @@ func (em *eventMonitor) handleEvent(any interface{}) error {
|
||||
case *eventtypes.TaskExit:
|
||||
e := any.(*eventtypes.TaskExit)
|
||||
logrus.Infof("TaskExit event %+v", e)
|
||||
cntr, err := em.containerStore.Get(e.ContainerID)
|
||||
cntr, err := em.c.containerStore.Get(e.ContainerID)
|
||||
if err == nil {
|
||||
if err := handleContainerExit(ctx, e, cntr); err != nil {
|
||||
return errors.Wrap(err, "failed to handle container TaskExit event")
|
||||
@ -207,7 +211,7 @@ func (em *eventMonitor) handleEvent(any interface{}) error {
|
||||
return errors.Wrap(err, "can't find container for TaskExit event")
|
||||
}
|
||||
// Use GetAll to include sandbox in unknown state.
|
||||
sb, err := em.sandboxStore.GetAll(e.ContainerID)
|
||||
sb, err := em.c.sandboxStore.GetAll(e.ContainerID)
|
||||
if err == nil {
|
||||
if err := handleSandboxExit(ctx, e, sb); err != nil {
|
||||
return errors.Wrap(err, "failed to handle sandbox TaskExit event")
|
||||
@ -220,12 +224,12 @@ func (em *eventMonitor) handleEvent(any interface{}) error {
|
||||
case *eventtypes.TaskOOM:
|
||||
e := any.(*eventtypes.TaskOOM)
|
||||
logrus.Infof("TaskOOM event %+v", e)
|
||||
cntr, err := em.containerStore.Get(e.ContainerID)
|
||||
cntr, err := em.c.containerStore.Get(e.ContainerID)
|
||||
if err != nil {
|
||||
if err != store.ErrNotExist {
|
||||
return errors.Wrap(err, "can't find container for TaskOOM event")
|
||||
}
|
||||
if _, err = em.sandboxStore.Get(e.ContainerID); err != nil {
|
||||
if _, err = em.c.sandboxStore.Get(e.ContainerID); err != nil {
|
||||
if err != store.ErrNotExist {
|
||||
return errors.Wrap(err, "can't find sandbox for TaskOOM event")
|
||||
}
|
||||
@ -243,15 +247,15 @@ func (em *eventMonitor) handleEvent(any interface{}) error {
|
||||
case *eventtypes.ImageCreate:
|
||||
e := any.(*eventtypes.ImageCreate)
|
||||
logrus.Infof("ImageCreate event %+v", e)
|
||||
return em.imageStore.Update(ctx, e.Name)
|
||||
return em.c.updateImage(ctx, e.Name)
|
||||
case *eventtypes.ImageUpdate:
|
||||
e := any.(*eventtypes.ImageUpdate)
|
||||
logrus.Infof("ImageUpdate event %+v", e)
|
||||
return em.imageStore.Update(ctx, e.Name)
|
||||
return em.c.updateImage(ctx, e.Name)
|
||||
case *eventtypes.ImageDelete:
|
||||
e := any.(*eventtypes.ImageDelete)
|
||||
logrus.Infof("ImageDelete event %+v", e)
|
||||
return em.imageStore.Update(ctx, e.Name)
|
||||
return em.c.updateImage(ctx, e.Name)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
4
vendor/github.com/containerd/cri/pkg/server/helpers.go
generated
vendored
4
vendor/github.com/containerd/cri/pkg/server/helpers.go
generated
vendored
@ -105,6 +105,10 @@ const (
|
||||
containerKindSandbox = "sandbox"
|
||||
// containerKindContainer is a label value indicating container is application container
|
||||
containerKindContainer = "container"
|
||||
// imageLabelKey is the label key indicating the image is managed by cri plugin.
|
||||
imageLabelKey = criContainerdPrefix + ".image"
|
||||
// imageLabelValue is the label value indicating the image is managed by cri plugin.
|
||||
imageLabelValue = "managed"
|
||||
// sandboxMetadataExtension is an extension name that identify metadata of sandbox in CreateContainerRequest
|
||||
sandboxMetadataExtension = criContainerdPrefix + ".sandbox.metadata"
|
||||
// containerMetadataExtension is an extension name that identify metadata of container in CreateContainerRequest
|
||||
|
7
vendor/github.com/containerd/cri/pkg/server/image_load.go
generated
vendored
7
vendor/github.com/containerd/cri/pkg/server/image_load.go
generated
vendored
@ -44,8 +44,11 @@ func (c *criService) LoadImage(ctx context.Context, r *api.LoadImageRequest) (*a
|
||||
}
|
||||
for _, repoTag := range repoTags {
|
||||
// Update image store to reflect the newest state in containerd.
|
||||
if err := c.imageStore.Update(ctx, repoTag); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to update image store %q", repoTag)
|
||||
// Image imported by importer.Import is not treated as managed
|
||||
// by the cri plugin, call `updateImage` to make it managed.
|
||||
// TODO(random-liu): Replace this with the containerd library (issue #909).
|
||||
if err := c.updateImage(ctx, repoTag); err != nil {
|
||||
return nil, errors.Wrapf(err, "update image store %q", repoTag)
|
||||
}
|
||||
logrus.Debugf("Imported image %q", repoTag)
|
||||
}
|
||||
|
53
vendor/github.com/containerd/cri/pkg/server/image_pull.go
generated
vendored
53
vendor/github.com/containerd/cri/pkg/server/image_pull.go
generated
vendored
@ -114,14 +114,16 @@ func (c *criService) PullImage(ctx context.Context, r *runtime.PullImageRequest)
|
||||
imageID := configDesc.Digest.String()
|
||||
|
||||
repoDigest, repoTag := getRepoDigestAndTag(namedRef, image.Target().Digest, isSchema1)
|
||||
for _, r := range []string{repoTag, repoDigest} {
|
||||
for _, r := range []string{imageID, repoTag, repoDigest} {
|
||||
if r == "" {
|
||||
continue
|
||||
}
|
||||
if err := c.createImageReference(ctx, r, image.Target()); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to update image reference %q", r)
|
||||
return nil, errors.Wrapf(err, "failed to create image reference %q", r)
|
||||
}
|
||||
// Update image store to reflect the newest state in containerd.
|
||||
// No need to use `updateImage`, because the image reference must
|
||||
// have been managed by the cri plugin.
|
||||
if err := c.imageStore.Update(ctx, r); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to update image store %q", r)
|
||||
}
|
||||
@ -174,18 +176,55 @@ func (c *criService) createImageReference(ctx context.Context, name string, desc
|
||||
img := containerdimages.Image{
|
||||
Name: name,
|
||||
Target: desc,
|
||||
// Add a label to indicate that the image is managed by the cri plugin.
|
||||
Labels: map[string]string{imageLabelKey: imageLabelValue},
|
||||
}
|
||||
// TODO(random-liu): Figure out which is the more performant sequence create then update or
|
||||
// update then create.
|
||||
_, err := c.client.ImageService().Create(ctx, img)
|
||||
if err == nil {
|
||||
oldImg, err := c.client.ImageService().Create(ctx, img)
|
||||
if err == nil || !errdefs.IsAlreadyExists(err) {
|
||||
return err
|
||||
}
|
||||
if oldImg.Target.Digest == img.Target.Digest && oldImg.Labels[imageLabelKey] == imageLabelValue {
|
||||
return nil
|
||||
}
|
||||
if !errdefs.IsAlreadyExists(err) {
|
||||
_, err = c.client.ImageService().Update(ctx, img, "target", "labels")
|
||||
return err
|
||||
}
|
||||
|
||||
// updateImage updates image store to reflect the newest state of an image reference
|
||||
// in containerd. If the reference is not managed by the cri plugin, the function also
|
||||
// generates necessary metadata for the image and make it managed.
|
||||
func (c *criService) updateImage(ctx context.Context, r string) error {
|
||||
img, err := c.client.GetImage(ctx, r)
|
||||
if err != nil && !errdefs.IsNotFound(err) {
|
||||
return errors.Wrap(err, "get image by reference")
|
||||
}
|
||||
_, err = c.client.ImageService().Update(ctx, img, "target")
|
||||
return err
|
||||
if err == nil && img.Labels()[imageLabelKey] != imageLabelValue {
|
||||
// Make sure the image has the image id as its unique
|
||||
// identifier that references the image in its lifetime.
|
||||
configDesc, err := img.Config(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "get image id")
|
||||
}
|
||||
id := configDesc.Digest.String()
|
||||
if err := c.createImageReference(ctx, id, img.Target()); err != nil {
|
||||
return errors.Wrapf(err, "create image id reference %q", id)
|
||||
}
|
||||
if err := c.imageStore.Update(ctx, id); err != nil {
|
||||
return errors.Wrapf(err, "update image store for %q", id)
|
||||
}
|
||||
// The image id is ready, add the label to mark the image as managed.
|
||||
if err := c.createImageReference(ctx, r, img.Target()); err != nil {
|
||||
return errors.Wrap(err, "create managed label")
|
||||
}
|
||||
}
|
||||
// If the image is not found, we should continue updating the cache,
|
||||
// so that the image can be removed from the cache.
|
||||
if err := c.imageStore.Update(ctx, r); err != nil {
|
||||
return errors.Wrapf(err, "update image store for %q", r)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// credentials returns a credential function for docker resolver to use.
|
||||
|
32
vendor/github.com/containerd/cri/pkg/server/restart.go
generated
vendored
32
vendor/github.com/containerd/cri/pkg/server/restart.go
generated
vendored
@ -36,7 +36,6 @@ import (
|
||||
|
||||
cio "github.com/containerd/cri/pkg/server/io"
|
||||
containerstore "github.com/containerd/cri/pkg/store/container"
|
||||
imagestore "github.com/containerd/cri/pkg/store/image"
|
||||
sandboxstore "github.com/containerd/cri/pkg/store/sandbox"
|
||||
)
|
||||
|
||||
@ -96,7 +95,7 @@ func (c *criService) recover(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to list images")
|
||||
}
|
||||
loadImages(ctx, c.imageStore, cImages, c.config.ContainerdConfig.Snapshotter)
|
||||
c.loadImages(ctx, cImages)
|
||||
|
||||
// It's possible that containerd containers are deleted unexpectedly. In that case,
|
||||
// we can't even get metadata, we should cleanup orphaned sandbox/container directories
|
||||
@ -136,8 +135,23 @@ func (c *criService) recover(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// loadContainerTimeout is the default timeout for loading a container/sandbox.
|
||||
// One container/sandbox hangs (e.g. containerd#2438) should not affect other
|
||||
// containers/sandboxes.
|
||||
// Most CRI container/sandbox related operations are per container, the ones
|
||||
// which handle multiple containers at a time are:
|
||||
// * ListPodSandboxes: Don't talk with containerd services.
|
||||
// * ListContainers: Don't talk with containerd services.
|
||||
// * ListContainerStats: Not in critical code path, a default timeout will
|
||||
// be applied at CRI level.
|
||||
// * Recovery logic: We should set a time for each container/sandbox recovery.
|
||||
// * Event montior: We should set a timeout for each container/sandbox event handling.
|
||||
const loadContainerTimeout = 10 * time.Second
|
||||
|
||||
// loadContainer loads container from containerd and status checkpoint.
|
||||
func (c *criService) loadContainer(ctx context.Context, cntr containerd.Container) (containerstore.Container, error) {
|
||||
ctx, cancel := context.WithTimeout(ctx, loadContainerTimeout)
|
||||
defer cancel()
|
||||
id := cntr.ID()
|
||||
containerDir := c.getContainerRootDir(id)
|
||||
volatileContainerDir := c.getVolatileContainerRootDir(id)
|
||||
@ -290,9 +304,9 @@ const (
|
||||
// unknownContainerStatus returns the default container status when its status is unknown.
|
||||
func unknownContainerStatus() containerstore.Status {
|
||||
return containerstore.Status{
|
||||
CreatedAt: time.Now().UnixNano(),
|
||||
StartedAt: time.Now().UnixNano(),
|
||||
FinishedAt: time.Now().UnixNano(),
|
||||
CreatedAt: 0,
|
||||
StartedAt: 0,
|
||||
FinishedAt: 0,
|
||||
ExitCode: unknownExitCode,
|
||||
Reason: unknownExitReason,
|
||||
}
|
||||
@ -300,6 +314,8 @@ func unknownContainerStatus() containerstore.Status {
|
||||
|
||||
// loadSandbox loads sandbox from containerd.
|
||||
func loadSandbox(ctx context.Context, cntr containerd.Container) (sandboxstore.Sandbox, error) {
|
||||
ctx, cancel := context.WithTimeout(ctx, loadContainerTimeout)
|
||||
defer cancel()
|
||||
var sandbox sandboxstore.Sandbox
|
||||
// Load sandbox metadata.
|
||||
exts, err := cntr.Extensions(ctx)
|
||||
@ -394,8 +410,8 @@ func loadSandbox(ctx context.Context, cntr containerd.Container) (sandboxstore.S
|
||||
}
|
||||
|
||||
// loadImages loads images from containerd.
|
||||
func loadImages(ctx context.Context, store *imagestore.Store, cImages []containerd.Image,
|
||||
snapshotter string) {
|
||||
func (c *criService) loadImages(ctx context.Context, cImages []containerd.Image) {
|
||||
snapshotter := c.config.ContainerdConfig.Snapshotter
|
||||
for _, i := range cImages {
|
||||
ok, _, _, _, err := containerdimages.Check(ctx, i.ContentStore(), i.Target(), platforms.Default())
|
||||
if err != nil {
|
||||
@ -416,7 +432,7 @@ func loadImages(ctx context.Context, store *imagestore.Store, cImages []containe
|
||||
logrus.Warnf("The image %s is not unpacked.", i.Name())
|
||||
// TODO(random-liu): Consider whether we should try unpack here.
|
||||
}
|
||||
if err := store.Update(ctx, i.Name()); err != nil {
|
||||
if err := c.updateImage(ctx, i.Name()); err != nil {
|
||||
logrus.WithError(err).Warnf("Failed to update reference for image %q", i.Name())
|
||||
continue
|
||||
}
|
||||
|
2
vendor/github.com/containerd/cri/pkg/server/service.go
generated
vendored
2
vendor/github.com/containerd/cri/pkg/server/service.go
generated
vendored
@ -157,7 +157,7 @@ func NewCRIService(config criconfig.Config, client *containerd.Client) (CRIServi
|
||||
return nil, errors.Wrap(err, "failed to create stream server")
|
||||
}
|
||||
|
||||
c.eventMonitor = newEventMonitor(c.containerStore, c.sandboxStore, c.imageStore)
|
||||
c.eventMonitor = newEventMonitor(c)
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
2
vendor/github.com/containerd/cri/pkg/store/image/fake_image.go
generated
vendored
2
vendor/github.com/containerd/cri/pkg/store/image/fake_image.go
generated
vendored
@ -27,7 +27,7 @@ func NewFakeStore(images []Image) (*Store, error) {
|
||||
s.refCache[ref] = i.ID
|
||||
}
|
||||
if err := s.store.add(i); err != nil {
|
||||
return nil, errors.Wrapf(err, "add image %q", i)
|
||||
return nil, errors.Wrapf(err, "add image %+v", i)
|
||||
}
|
||||
}
|
||||
return s, nil
|
||||
|
26
vendor/github.com/containerd/cri/vendor.conf
generated
vendored
26
vendor/github.com/containerd/cri/vendor.conf
generated
vendored
@ -1,15 +1,14 @@
|
||||
github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
|
||||
github.com/blang/semver v3.1.0
|
||||
github.com/boltdb/bolt v1.3.1
|
||||
github.com/BurntSushi/toml a368813c5e648fee92e5f6c30e3944ff9d5e8895
|
||||
github.com/containerd/cgroups 5e610833b72089b37d0e615de9a92dfc043757c2
|
||||
github.com/containerd/console c12b1e7919c14469339a5d38f2f8ed9b64a9de23
|
||||
github.com/containerd/containerd 1950f791d9225ffe061c77e74e292bcb3c428a04
|
||||
github.com/containerd/continuity f44b615e492bdfb371aae2f76ec694d9da1db537
|
||||
github.com/containerd/containerd f88d3e5d6dfe9b7d7941ac5241649ad8240b9282
|
||||
github.com/containerd/continuity 7f53d412b9eb1cbf744c2063185d703a0ee34700
|
||||
github.com/containerd/fifo 3d5202aec260678c48179c56f40e6f38a095738c
|
||||
github.com/containerd/go-cni 6d7b509a054a3cb1c35ed1865d4fde2f0cb547cd
|
||||
github.com/containerd/go-runc 5a6d9f37cfa36b15efba46dc7ea349fa9b7143c3
|
||||
github.com/containerd/ttrpc 94dde388801693c54f88a6596f713b51a8b30b2d
|
||||
github.com/containerd/ttrpc 2a805f71863501300ae1976d29f0454ae003e85a
|
||||
github.com/containerd/typeurl a93fcdb778cd272c6e9b3028b2f42d813e785d40
|
||||
github.com/containernetworking/cni v0.6.0
|
||||
github.com/containernetworking/plugins v0.7.0
|
||||
@ -35,13 +34,13 @@ github.com/hashicorp/go-multierror ed905158d87462226a13fe39ddf685ea65f1c11f
|
||||
github.com/json-iterator/go 1.1.5
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.0
|
||||
github.com/Microsoft/go-winio v0.4.10
|
||||
github.com/Microsoft/hcsshim 44c060121b68e8bdc40b411beba551f3b4ee9e55
|
||||
github.com/Microsoft/hcsshim v0.7.4
|
||||
github.com/modern-go/concurrent 1.0.3
|
||||
github.com/modern-go/reflect2 1.0.1
|
||||
github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7
|
||||
github.com/opencontainers/image-spec v1.0.1
|
||||
github.com/opencontainers/runc 20aff4f0488c6d4b8df4d85b4f63f1f704c11abd
|
||||
github.com/opencontainers/runtime-spec d810dbc60d8c5aeeb3d054bd1132fab2121968ce
|
||||
github.com/opencontainers/runc 00dc70017d222b178a002ed30e9321b12647af2d
|
||||
github.com/opencontainers/runtime-spec eba862dc2470385a233c7507392675cbeadf7353
|
||||
github.com/opencontainers/runtime-tools v0.6.0
|
||||
github.com/opencontainers/selinux b6fa367ed7f534f9ba25391cc2d467085dbb445a
|
||||
github.com/pkg/errors v0.8.0
|
||||
@ -59,6 +58,7 @@ github.com/urfave/cli 7bc6a0acffa589f415f88aca16cc1de5ffd66f9c
|
||||
github.com/xeipuuv/gojsonpointer 4e3ac2762d5f479393488629ee9370b50873b3a6
|
||||
github.com/xeipuuv/gojsonreference bd5ef7bd5415a7ac448318e64f11a24cd21e594b
|
||||
github.com/xeipuuv/gojsonschema 1d523034197ff1f222f6429836dd36a2457a1874
|
||||
go.etcd.io/bbolt v1.3.1-etcd.8
|
||||
golang.org/x/crypto 49796115aa4b964c318aad4f3084fdb41e9aa067
|
||||
golang.org/x/net b3756b4b77d7b13260a0a2ec658753cf48922eac
|
||||
golang.org/x/oauth2 a6bd8cefa1811bd24b86f8902872e4e8225f74c4
|
||||
@ -70,9 +70,9 @@ google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
|
||||
google.golang.org/grpc v1.12.0
|
||||
gopkg.in/inf.v0 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4
|
||||
gopkg.in/yaml.v2 53feefa2559fb8dfa8d81baad31be332c97d6c77
|
||||
k8s.io/api 012f271b5d41baad56190c5f1ae19bff16df0fd8
|
||||
k8s.io/apimachinery 6429050ef506887d121f3e7306e894f8900d8a63
|
||||
k8s.io/apiserver e9312c15296b6c2c923ebd5031ff5d1d5fd022d7
|
||||
k8s.io/client-go 37c3c02ec96533daec0dbda1f39a6b1d68505c79
|
||||
k8s.io/kubernetes v1.12.0-beta.1
|
||||
k8s.io/utils 982821ea41da7e7c15f3d3738921eb2e7e241ccd
|
||||
k8s.io/api kubernetes-1.12.0
|
||||
k8s.io/apimachinery kubernetes-1.12.0
|
||||
k8s.io/apiserver kubernetes-1.12.0
|
||||
k8s.io/client-go kubernetes-1.12.0
|
||||
k8s.io/kubernetes v1.12.0
|
||||
k8s.io/utils cd34563cd63c2bd7c6fe88a73c4dcf34ed8a67cb
|
||||
|
1615
vendor/k8s.io/api/core/v1/generated.pb.go
generated
vendored
1615
vendor/k8s.io/api/core/v1/generated.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
5
vendor/k8s.io/api/core/v1/generated.proto
generated
vendored
5
vendor/k8s.io/api/core/v1/generated.proto
generated
vendored
@ -4503,7 +4503,10 @@ message TopologySelectorTerm {
|
||||
// TypedLocalObjectReference contains enough information to let you locate the
|
||||
// typed referenced object inside the same namespace.
|
||||
message TypedLocalObjectReference {
|
||||
// APIGroup is the group for the resource being referenced
|
||||
// APIGroup is the group for the resource being referenced.
|
||||
// If APIGroup is not specified, the specified Kind must be in the core API group.
|
||||
// For any other third-party types, APIGroup is required.
|
||||
// +optional
|
||||
optional string apiGroup = 1;
|
||||
|
||||
// Kind is the type of resource being referenced
|
||||
|
7
vendor/k8s.io/api/core/v1/types.go
generated
vendored
7
vendor/k8s.io/api/core/v1/types.go
generated
vendored
@ -4495,8 +4495,11 @@ type LocalObjectReference struct {
|
||||
// TypedLocalObjectReference contains enough information to let you locate the
|
||||
// typed referenced object inside the same namespace.
|
||||
type TypedLocalObjectReference struct {
|
||||
// APIGroup is the group for the resource being referenced
|
||||
APIGroup string `json:"apiGroup" protobuf:"bytes,1,opt,name=apiGroup"`
|
||||
// APIGroup is the group for the resource being referenced.
|
||||
// If APIGroup is not specified, the specified Kind must be in the core API group.
|
||||
// For any other third-party types, APIGroup is required.
|
||||
// +optional
|
||||
APIGroup *string `json:"apiGroup" protobuf:"bytes,1,opt,name=apiGroup"`
|
||||
// Kind is the type of resource being referenced
|
||||
Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"`
|
||||
// Name is the name of resource being referenced
|
||||
|
2
vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go
generated
vendored
2
vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go
generated
vendored
@ -2211,7 +2211,7 @@ func (TopologySelectorTerm) SwaggerDoc() map[string]string {
|
||||
|
||||
var map_TypedLocalObjectReference = map[string]string{
|
||||
"": "TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace.",
|
||||
"apiGroup": "APIGroup is the group for the resource being referenced",
|
||||
"apiGroup": "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.",
|
||||
"kind": "Kind is the type of resource being referenced",
|
||||
"name": "Name is the name of resource being referenced",
|
||||
}
|
||||
|
7
vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go
generated
vendored
7
vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go
generated
vendored
@ -2687,7 +2687,7 @@ func (in *PersistentVolumeClaimSpec) DeepCopyInto(out *PersistentVolumeClaimSpec
|
||||
if in.DataSource != nil {
|
||||
in, out := &in.DataSource, &out.DataSource
|
||||
*out = new(TypedLocalObjectReference)
|
||||
**out = **in
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -5090,6 +5090,11 @@ func (in *TopologySelectorTerm) DeepCopy() *TopologySelectorTerm {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *TypedLocalObjectReference) DeepCopyInto(out *TypedLocalObjectReference) {
|
||||
*out = *in
|
||||
if in.APIGroup != nil {
|
||||
in, out := &in.APIGroup, &out.APIGroup
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
65
vendor/k8s.io/client-go/rest/request.go
generated
vendored
65
vendor/k8s.io/client-go/rest/request.go
generated
vendored
@ -455,17 +455,9 @@ func (r *Request) URL() *url.URL {
|
||||
|
||||
// finalURLTemplate is similar to URL(), but will make all specific parameter values equal
|
||||
// - instead of name or namespace, "{name}" and "{namespace}" will be used, and all query
|
||||
// parameters will be reset. This creates a copy of the request so as not to change the
|
||||
// underlying object. This means some useful request info (like the types of field
|
||||
// selectors in use) will be lost.
|
||||
// TODO: preserve field selector keys
|
||||
// parameters will be reset. This creates a copy of the url so as not to change the
|
||||
// underlying object.
|
||||
func (r Request) finalURLTemplate() url.URL {
|
||||
if len(r.resourceName) != 0 {
|
||||
r.resourceName = "{name}"
|
||||
}
|
||||
if r.namespaceSet && len(r.namespace) != 0 {
|
||||
r.namespace = "{namespace}"
|
||||
}
|
||||
newParams := url.Values{}
|
||||
v := []string{"{value}"}
|
||||
for k := range r.params {
|
||||
@ -473,6 +465,59 @@ func (r Request) finalURLTemplate() url.URL {
|
||||
}
|
||||
r.params = newParams
|
||||
url := r.URL()
|
||||
segments := strings.Split(r.URL().Path, "/")
|
||||
groupIndex := 0
|
||||
index := 0
|
||||
if r.URL() != nil && r.baseURL != nil && strings.Contains(r.URL().Path, r.baseURL.Path) {
|
||||
groupIndex += len(strings.Split(r.baseURL.Path, "/"))
|
||||
}
|
||||
if groupIndex >= len(segments) {
|
||||
return *url
|
||||
}
|
||||
|
||||
const CoreGroupPrefix = "api"
|
||||
const NamedGroupPrefix = "apis"
|
||||
isCoreGroup := segments[groupIndex] == CoreGroupPrefix
|
||||
isNamedGroup := segments[groupIndex] == NamedGroupPrefix
|
||||
if isCoreGroup {
|
||||
// checking the case of core group with /api/v1/... format
|
||||
index = groupIndex + 2
|
||||
} else if isNamedGroup {
|
||||
// checking the case of named group with /apis/apps/v1/... format
|
||||
index = groupIndex + 3
|
||||
} else {
|
||||
// this should not happen that the only two possibilities are /api... and /apis..., just want to put an
|
||||
// outlet here in case more API groups are added in future if ever possible:
|
||||
// https://kubernetes.io/docs/concepts/overview/kubernetes-api/#api-groups
|
||||
// if a wrong API groups name is encountered, return the {prefix} for url.Path
|
||||
url.Path = "/{prefix}"
|
||||
url.RawQuery = ""
|
||||
return *url
|
||||
}
|
||||
//switch segLength := len(segments) - index; segLength {
|
||||
switch {
|
||||
// case len(segments) - index == 1:
|
||||
// resource (with no name) do nothing
|
||||
case len(segments)-index == 2:
|
||||
// /$RESOURCE/$NAME: replace $NAME with {name}
|
||||
segments[index+1] = "{name}"
|
||||
case len(segments)-index == 3:
|
||||
if segments[index+2] == "finalize" || segments[index+2] == "status" {
|
||||
// /$RESOURCE/$NAME/$SUBRESOURCE: replace $NAME with {name}
|
||||
segments[index+1] = "{name}"
|
||||
} else {
|
||||
// /namespace/$NAMESPACE/$RESOURCE: replace $NAMESPACE with {namespace}
|
||||
segments[index+1] = "{namespace}"
|
||||
}
|
||||
case len(segments)-index >= 4:
|
||||
segments[index+1] = "{namespace}"
|
||||
// /namespace/$NAMESPACE/$RESOURCE/$NAME: replace $NAMESPACE with {namespace}, $NAME with {name}
|
||||
if segments[index+3] != "finalize" && segments[index+3] != "status" {
|
||||
// /$RESOURCE/$NAME/$SUBRESOURCE: replace $NAME with {name}
|
||||
segments[index+3] = "{name}"
|
||||
}
|
||||
}
|
||||
url.Path = path.Join(segments...)
|
||||
return *url
|
||||
}
|
||||
|
||||
|
4
vendor/k8s.io/kubernetes/README.md
generated
vendored
4
vendor/k8s.io/kubernetes/README.md
generated
vendored
@ -1,6 +1,6 @@
|
||||
# Kubernetes
|
||||
|
||||
[![Submit Queue Widget]][Submit Queue] [![GoDoc Widget]][GoDoc] [](https://bestpractices.coreinfrastructure.org/projects/569)
|
||||
[![GoDoc Widget]][GoDoc] [](https://bestpractices.coreinfrastructure.org/projects/569)
|
||||
|
||||
<img src="https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png" width="100">
|
||||
|
||||
@ -79,8 +79,6 @@ That said, if you have questions, reach out to us
|
||||
[interactive tutorial]: https://kubernetes.io/docs/tutorials/kubernetes-basics
|
||||
[kubernetes.io]: https://kubernetes.io
|
||||
[Scalable Microservices with Kubernetes]: https://www.udacity.com/course/scalable-microservices-with-kubernetes--ud615
|
||||
[Submit Queue]: https://submit-queue.k8s.io/#/ci
|
||||
[Submit Queue Widget]: https://submit-queue.k8s.io/health.svg?v=1
|
||||
[troubleshooting guide]: https://kubernetes.io/docs/tasks/debug-application-cluster/troubleshooting/
|
||||
|
||||
[]()
|
||||
|
7
vendor/k8s.io/kubernetes/pkg/apis/core/types.go
generated
vendored
7
vendor/k8s.io/kubernetes/pkg/apis/core/types.go
generated
vendored
@ -3976,8 +3976,11 @@ type LocalObjectReference struct {
|
||||
|
||||
// TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace.
|
||||
type TypedLocalObjectReference struct {
|
||||
// APIGroup is the group for the resource being referenced
|
||||
APIGroup string
|
||||
// APIGroup is the group for the resource being referenced.
|
||||
// If APIGroup is not specified, the specified Kind must be in the core API group.
|
||||
// For any other third-party types, APIGroup is required.
|
||||
// +optional
|
||||
APIGroup *string
|
||||
// Kind is the type of resource being referenced
|
||||
Kind string
|
||||
// Name is the name of resource being referenced
|
||||
|
7
vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go
generated
vendored
7
vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go
generated
vendored
@ -2689,7 +2689,7 @@ func (in *PersistentVolumeClaimSpec) DeepCopyInto(out *PersistentVolumeClaimSpec
|
||||
if in.DataSource != nil {
|
||||
in, out := &in.DataSource, &out.DataSource
|
||||
*out = new(TypedLocalObjectReference)
|
||||
**out = **in
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -5075,6 +5075,11 @@ func (in *TopologySelectorTerm) DeepCopy() *TopologySelectorTerm {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *TypedLocalObjectReference) DeepCopyInto(out *TypedLocalObjectReference) {
|
||||
*out = *in
|
||||
if in.APIGroup != nil {
|
||||
in, out := &in.APIGroup, &out.APIGroup
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user