vendor: update containerd/cri 61b7af7564

full diff: 92cb4ed978..61b7af7564

This adds new dependency github.com/fsnotify/fsnotify since 4ce334aa49

Signed-off-by: Kohei Tokunaga <ktokunaga.mail@gmail.com>
This commit is contained in:
ktock 2020-04-17 12:31:30 +09:00
parent aa6a66b65e
commit e79d666cdd
64 changed files with 5342 additions and 1867 deletions

View File

@ -56,7 +56,7 @@ gotest.tools/v3 bb0d8a963040ea5048dcef1a14d8
github.com/cilium/ebpf 4032b1d8aae306b7bb94a2a11002932caf88c644
# cri dependencies
github.com/containerd/cri 92cb4ed9786a6cd271152ba1f862183d84701003 # master
github.com/containerd/cri 61b7af7564602234662562a9d37e772f5a54facb # master
github.com/davecgh/go-spew 8991bc29aa16c548c550c7ff78260e27b9ab7c73 # v1.1.1
github.com/docker/distribution 0d3efadf0154c2b8a4e7b6621fff9809655cc580
github.com/docker/docker 4634ce647cf2ce2c6031129ccd109e557244986f
@ -75,14 +75,14 @@ golang.org/x/oauth2 0f29369cfe4552d0e4bcddc57cc7
golang.org/x/time 9d24e82272b4f38b78bc8cff74fa936d31ccd8ef
gopkg.in/inf.v0 d2d2541c53f18d2a059457998ce2876cc8e67cbf # v0.9.1
gopkg.in/yaml.v2 53403b58ad1b561927d19068c655246f2db79d48 # v2.2.8
k8s.io/api e6bc7324d7efd1c8ab0e68dd8162a2b500b0ce3b # v0.18.0-beta.1
k8s.io/apimachinery 2373d029717c4d169463414a6127cd1d0d12680e # v0.18.0-beta.1
k8s.io/apiserver 9af6eefd238b73bb688c429c32d113d04eeea3a8 # v0.18.0-beta.1
k8s.io/client-go 33a99c0dca04e5e335442262f93df4c3faab201b # v0.18.0-beta.1
k8s.io/cri-api 3d1680d8d202aa12c5dc5689170c3c03a488d35b # v0.18.0-beta.1
k8s.io/api d2dce8e1788e4be2be3a62b6439b3eaa087df0df # v0.18.0
k8s.io/apimachinery 105e0c6d63f10531ed07f3b5a2195771a0fa444b # v0.18.0
k8s.io/apiserver 5c8e895629a454efd75a453d1dea5b8142db0013 # v0.18.0
k8s.io/client-go 0b19784585bd0a0ee5509855829ead81feaa2bdc # v0.18.0
k8s.io/cri-api 3d1680d8d202aa12c5dc5689170c3c03a488d35b # v0.18.0
k8s.io/klog 2ca9ad30301bf30a8a6e0fa2110db6b8df699a91 # v1.0.0
k8s.io/kubernetes f2d7577e31829664899f1b8e3d3a73de8c5f4029 # v1.18.0-beta.1
k8s.io/utils 5f6fbceb4c31d35291b2de756aeaae2ddeee3e92
k8s.io/kubernetes 9e991415386e4cf155a24b1da15becaa390438d8 # v1.18.0
k8s.io/utils a9aa75ae1b89e1b992c33383f48e942d97e52dae
sigs.k8s.io/structured-merge-diff/v3 877aee05330847a873a1a8998b40e12a1e0fde25 # v3.0.0
sigs.k8s.io/yaml 9fc95527decd95bb9d28cc2eab08179b2d0f6971 # v1.2.0
@ -90,6 +90,7 @@ sigs.k8s.io/yaml 9fc95527decd95bb9d28cc2eab08
github.com/containerd/go-cni 0d360c50b10b350b6bb23863fd4dfb1c232b01c9
github.com/containernetworking/cni 4cfb7b568922a3c79a23e438dc52fe537fc9687e # v0.7.1
github.com/containernetworking/plugins 9f96827c7cabb03f21d86326000c00f61e181f6a # v0.7.6
github.com/fsnotify/fsnotify 4bf2d1fec78374803a39307bfb8d340688f4f28e # v1.4.8
# image decrypt depedencies
github.com/containerd/imgcrypt 9e761ccd6069fb707ec9493435f31475b5524b38 # v1.0.1

View File

@ -78,7 +78,7 @@ specifications as appropriate.
backport version of `libseccomp-dev` is required. See [travis.yml](.travis.yml) for an example on trusty.
* **btrfs development library.** Required by containerd btrfs support. `btrfs-tools`(Ubuntu, Debian) / `btrfs-progs-devel`(Fedora, CentOS, RHEL)
2. Install **`socat`** (required by portforward).
2. Install and setup a go 1.13.8 development environment. (Note: You can check the travis logs for a recent pull request to confirm the version(s) of golang currently being used to build and test master.)
2. Install and setup a go 1.13.9 development environment. (Note: You can check the travis logs for a recent pull request to confirm the version(s) of golang currently being used to build and test master.)
3. Make a local clone of this repository.
4. Install binary dependencies by running the following command from your cloned `cri/` project directory:
```bash
@ -102,7 +102,7 @@ testing purposes. The version tag carries the suffix "-TEST".*
To add build tags to the make option the `BUILD_TAGS` variable must be set.
```bash
make BUILD_TAGS='seccomp apparmor'
make BUILD_TAGS='seccomp apparmor selinux'
```
| Build Tag | Feature | Dependency |

View File

@ -73,6 +73,11 @@ type ContainerdConfig struct {
// NoPivot disables pivot-root (linux only), required when running a container in a RamDisk with runc
// This only works for runtime type "io.containerd.runtime.v1.linux".
NoPivot bool `toml:"no_pivot" json:"noPivot"`
// DisableSnapshotAnnotations disables to pass additional annotations (image
// related information) to snapshotters. These annotations are required by
// stargz snapshotter (https://github.com/containerd/stargz-snapshotter).
DisableSnapshotAnnotations bool `toml:"disable_snapshot_annotations" json:"disableSnapshotAnnotations"`
}
// CniConfig contains toml config related to cni

View File

@ -0,0 +1,121 @@
/*
Copyright The Containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package server
import (
"os"
"sync"
cni "github.com/containerd/go-cni"
"github.com/fsnotify/fsnotify"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// cniNetConfSyncer is used to reload cni network conf triggered by fs change
// events.
type cniNetConfSyncer struct {
// only used for lastSyncStatus
sync.RWMutex
lastSyncStatus error
watcher *fsnotify.Watcher
confDir string
netPlugin cni.CNI
loadOpts []cni.CNIOpt
}
// newCNINetConfSyncer creates cni network conf syncer.
func newCNINetConfSyncer(confDir string, netPlugin cni.CNI, loadOpts []cni.CNIOpt) (*cniNetConfSyncer, error) {
watcher, err := fsnotify.NewWatcher()
if err != nil {
return nil, errors.Wrap(err, "failed to create fsnotify watcher")
}
if err := os.MkdirAll(confDir, 0700); err != nil {
return nil, errors.Wrapf(err, "failed to create cni conf dir=%s for watch", confDir)
}
if err := watcher.Add(confDir); err != nil {
return nil, errors.Wrapf(err, "failed to watch cni conf dir %s", confDir)
}
syncer := &cniNetConfSyncer{
watcher: watcher,
confDir: confDir,
netPlugin: netPlugin,
loadOpts: loadOpts,
}
if err := syncer.netPlugin.Load(syncer.loadOpts...); err != nil {
logrus.WithError(err).Error("failed to load cni during init, please check CRI plugin status before setting up network for pods")
syncer.updateLastStatus(err)
}
return syncer, nil
}
// syncLoop monitors any fs change events from cni conf dir and tries to reload
// cni configuration.
func (syncer *cniNetConfSyncer) syncLoop() error {
for {
select {
case event := <-syncer.watcher.Events:
// Only reload config when receiving write/rename/remove
// events
//
// TODO(fuweid): Might only reload target cni config
// files to prevent no-ops.
if event.Op&(fsnotify.Chmod|fsnotify.Create) > 0 {
logrus.Debugf("ignore event from cni conf dir: %s", event)
continue
}
logrus.Debugf("receiving change event from cni conf dir: %s", event)
lerr := syncer.netPlugin.Load(syncer.loadOpts...)
if lerr != nil {
logrus.WithError(lerr).
Errorf("failed to reload cni configuration after receiving fs change event(%s)", event)
}
syncer.updateLastStatus(lerr)
case err := <-syncer.watcher.Errors:
if err != nil {
logrus.WithError(err).Error("failed to continue sync cni conf change")
return err
}
}
}
}
// lastStatus retrieves last sync status.
func (syncer *cniNetConfSyncer) lastStatus() error {
syncer.RLock()
defer syncer.RUnlock()
return syncer.lastSyncStatus
}
// updateLastStatus will be called after every single cni load.
func (syncer *cniNetConfSyncer) updateLastStatus(err error) {
syncer.Lock()
defer syncer.Unlock()
syncer.lastSyncStatus = err
}
// stop stops watcher in the syncLoop.
func (syncer *cniNetConfSyncer) stop() error {
return syncer.watcher.Close()
}

View File

@ -44,6 +44,8 @@ func (c *criService) Attach(ctx context.Context, r *runtime.AttachRequest) (*run
func (c *criService) attachContainer(ctx context.Context, id string, stdin io.Reader, stdout, stderr io.WriteCloser,
tty bool, resize <-chan remotecommand.TerminalSize) error {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
// Get container from our container store.
cntr, err := c.containerStore.Get(id)
if err != nil {
@ -60,7 +62,7 @@ func (c *criService) attachContainer(ctx context.Context, id string, stdin io.Re
if err != nil {
return errors.Wrap(err, "failed to load task")
}
handleResizing(resize, func(size remotecommand.TerminalSize) {
handleResizing(ctx, resize, func(size remotecommand.TerminalSize) {
if err := task.Resize(ctx, uint32(size.Width), uint32(size.Height)); err != nil {
log.G(ctx).WithError(err).Errorf("Failed to resize task %q console", id)
}

View File

@ -132,7 +132,7 @@ func (c *criService) execInternal(ctx context.Context, container containerd.Cont
return nil, errors.Wrapf(err, "failed to start exec %q", execID)
}
handleResizing(opts.resize, func(size remotecommand.TerminalSize) {
handleResizing(ctx, opts.resize, func(size remotecommand.TerminalSize) {
if err := process.Resize(ctx, uint32(size.Width), uint32(size.Height)); err != nil {
log.G(ctx).WithError(err).Errorf("Failed to resize process %q console for container %q", execID, id)
}

View File

@ -28,10 +28,10 @@ import (
"github.com/containerd/containerd"
"github.com/containerd/containerd/containers"
"github.com/containerd/containerd/plugin"
"github.com/containerd/containerd/reference/docker"
"github.com/containerd/containerd/runtime/linux/runctypes"
runcoptions "github.com/containerd/containerd/runtime/v2/runc/options"
"github.com/containerd/typeurl"
"github.com/docker/distribution/reference"
imagedigest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"golang.org/x/net/context"
@ -146,12 +146,12 @@ func criContainerStateToString(state runtime.ContainerState) string {
}
// getRepoDigestAngTag returns image repoDigest and repoTag of the named image reference.
func getRepoDigestAndTag(namedRef reference.Named, digest imagedigest.Digest, schema1 bool) (string, string) {
func getRepoDigestAndTag(namedRef docker.Named, digest imagedigest.Digest, schema1 bool) (string, string) {
var repoTag, repoDigest string
if _, ok := namedRef.(reference.NamedTagged); ok {
if _, ok := namedRef.(docker.NamedTagged); ok {
repoTag = namedRef.String()
}
if _, ok := namedRef.(reference.Canonical); ok {
if _, ok := namedRef.(docker.Canonical); ok {
repoDigest = namedRef.String()
} else if !schema1 {
// digest is not actual repo digest for schema1 image.
@ -170,7 +170,7 @@ func (c *criService) localResolve(refOrID string) (imagestore.Image, error) {
return func(ref string) string {
// ref is not image id, try to resolve it locally.
// TODO(random-liu): Handle this error better for debugging.
normalized, err := reference.ParseDockerRef(ref)
normalized, err := docker.ParseDockerRef(ref)
if err != nil {
return ""
}
@ -283,13 +283,13 @@ func toRuntimeAuthConfig(a criconfig.AuthConfig) *runtime.AuthConfig {
func parseImageReferences(refs []string) ([]string, []string) {
var tags, digests []string
for _, ref := range refs {
parsed, err := reference.ParseAnyReference(ref)
parsed, err := docker.ParseAnyReference(ref)
if err != nil {
continue
}
if _, ok := parsed.(reference.Canonical); ok {
if _, ok := parsed.(docker.Canonical); ok {
digests = append(digests, parsed.String())
} else if _, ok := parsed.(reference.Tagged); ok {
} else if _, ok := parsed.(docker.Tagged); ok {
tags = append(tags, parsed.String())
}
}

View File

@ -31,10 +31,10 @@ import (
"github.com/containerd/containerd/errdefs"
containerdimages "github.com/containerd/containerd/images"
"github.com/containerd/containerd/log"
distribution "github.com/containerd/containerd/reference/docker"
"github.com/containerd/containerd/remotes/docker"
"github.com/containerd/imgcrypt"
"github.com/containerd/imgcrypt/images/encryption"
distribution "github.com/docker/distribution/reference"
imagespec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"golang.org/x/net/context"
@ -120,6 +120,10 @@ func (c *criService) PullImage(ctx context.Context, r *runtime.PullImageRequest)
}
pullOpts = append(pullOpts, c.encryptedImagesPullOpts()...)
if !c.config.ContainerdConfig.DisableSnapshotAnnotations {
pullOpts = append(pullOpts,
containerd.WithImageHandlerWrapper(appendInfoHandlerWrapper(ref)))
}
image, err := c.client.Pull(ctx, ref, pullOpts...)
if err != nil {
@ -422,3 +426,42 @@ func (c *criService) encryptedImagesPullOpts() []containerd.RemoteOpt {
}
return nil
}
const (
// targetRefLabel is a label which contains image reference and will be passed
// to snapshotters.
targetRefLabel = "containerd.io/snapshot/cri.image-ref"
// targetDigestLabel is a label which contains layer digest and will be passed
// to snapshotters.
targetDigestLabel = "containerd.io/snapshot/cri.layer-digest"
)
// appendInfoHandlerWrapper makes a handler which appends some basic information
// of images to each layer descriptor as annotations during unpack. These
// annotations will be passed to snapshotters as labels. These labels will be
// used mainly by stargz-based snapshotters for querying image contents from the
// registry.
func appendInfoHandlerWrapper(ref string) func(f containerdimages.Handler) containerdimages.Handler {
return func(f containerdimages.Handler) containerdimages.Handler {
return containerdimages.HandlerFunc(func(ctx context.Context, desc imagespec.Descriptor) ([]imagespec.Descriptor, error) {
children, err := f.Handle(ctx, desc)
if err != nil {
return nil, err
}
switch desc.MediaType {
case imagespec.MediaTypeImageManifest, containerdimages.MediaTypeDockerSchema2Manifest:
for i := range children {
c := &children[i]
if containerdimages.IsLayerType(c.MediaType) {
if c.Annotations == nil {
c.Annotations = make(map[string]string)
}
c.Annotations[targetRefLabel] = ref
c.Annotations[targetDigestLabel] = c.Digest.String()
}
}
}
return children, nil
})
}
}

View File

@ -91,6 +91,9 @@ type criService struct {
// initialized indicates whether the server is initialized. All GRPC services
// should return error before the server is initialized.
initialized atomic.Bool
// cniNetConfMonitor is used to reload cni network conf if there is
// any valid fs change events from cni network conf dir.
cniNetConfMonitor *cniNetConfSyncer
}
// NewCRIService returns a new instance of CRIService
@ -128,6 +131,11 @@ func NewCRIService(config criconfig.Config, client *containerd.Client) (CRIServi
c.eventMonitor = newEventMonitor(c)
c.cniNetConfMonitor, err = newCNINetConfSyncer(c.config.NetworkPluginConfDir, c.netPlugin, c.cniLoadOptions())
if err != nil {
return nil, errors.Wrap(err, "failed to create cni conf monitor")
}
return c, nil
}
@ -169,6 +177,14 @@ func (c *criService) Run() error {
)
snapshotsSyncer.start()
// Start CNI network conf syncer
logrus.Info("Start cni network conf syncer")
cniNetConfMonitorErrCh := make(chan error, 1)
go func() {
defer close(cniNetConfMonitorErrCh)
cniNetConfMonitorErrCh <- c.cniNetConfMonitor.syncLoop()
}()
// Start streaming server.
logrus.Info("Start streaming server")
streamServerErrCh := make(chan error)
@ -183,11 +199,12 @@ func (c *criService) Run() error {
// Set the server as initialized. GRPC services could start serving traffic.
c.initialized.Set()
var eventMonitorErr, streamServerErr error
var eventMonitorErr, streamServerErr, cniNetConfMonitorErr error
// Stop the whole CRI service if any of the critical service exits.
select {
case eventMonitorErr = <-eventMonitorErrCh:
case streamServerErr = <-streamServerErrCh:
case cniNetConfMonitorErr = <-cniNetConfMonitorErrCh:
}
if err := c.Close(); err != nil {
return errors.Wrap(err, "failed to stop cri service")
@ -222,6 +239,9 @@ func (c *criService) Run() error {
if streamServerErr != nil {
return errors.Wrap(streamServerErr, "stream server error")
}
if cniNetConfMonitorErr != nil {
return errors.Wrap(cniNetConfMonitorErr, "cni network conf monitor error")
}
return nil
}
@ -229,6 +249,9 @@ func (c *criService) Run() error {
// TODO(random-liu): Make close synchronous.
func (c *criService) Close() error {
logrus.Info("Stop CRI service")
if err := c.cniNetConfMonitor.stop(); err != nil {
logrus.WithError(err).Error("failed to stop cni network conf monitor")
}
c.eventMonitor.stop()
if err := c.streamServer.Stop(); err != nil {
return errors.Wrap(err, "failed to stop stream server")

View File

@ -60,11 +60,6 @@ func (c *criService) initPlatform() error {
return errors.Wrap(err, "failed to initialize cni")
}
// Try to load the config if it exists. Just log the error if load fails
// This is not disruptive for containerd to panic
if err := c.netPlugin.Load(c.cniLoadOptions()...); err != nil {
logrus.WithError(err).Error("Failed to load cni during init, please check CRI plugin status before setting up network for pods")
}
return nil
}

View File

@ -21,7 +21,6 @@ package server
import (
cni "github.com/containerd/go-cni"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// windowsNetworkAttachCount is the minimum number of networks the PodSandbox
@ -44,11 +43,6 @@ func (c *criService) initPlatform() error {
return errors.Wrap(err, "failed to initialize cni")
}
// Try to load the config if it exists. Just log the error if load fails
// This is not disruptive for containerd to panic
if err := c.netPlugin.Load(c.cniLoadOptions()...); err != nil {
logrus.WithError(err).Error("Failed to load cni during init, please check CRI plugin status before setting up network for pods")
}
return nil
}

View File

@ -41,10 +41,6 @@ func (c *criService) Status(ctx context.Context, r *runtime.StatusRequest) (*run
Type: runtime.NetworkReady,
Status: true,
}
// Load the latest cni configuration to be in sync with the latest network configuration
if err := c.netPlugin.Load(c.cniLoadOptions()...); err != nil {
log.G(ctx).WithError(err).Errorf("Failed to load cni configuration")
}
// Check the status of the cni initialization
if err := c.netPlugin.Status(); err != nil {
networkCondition.Status = false
@ -76,6 +72,12 @@ func (c *criService) Status(ctx context.Context, r *runtime.StatusRequest) (*run
log.G(ctx).WithError(err).Errorf("Failed to marshal CNI config %v", err)
}
resp.Info["cniconfig"] = string(cniConfig)
lastCNILoadStatus := "OK"
if lerr := c.cniNetConfMonitor.lastStatus(); lerr != nil {
lastCNILoadStatus = lerr.Error()
}
resp.Info["lastCNILoadStatus"] = lastCNILoadStatus
}
return resp, nil
}

View File

@ -17,6 +17,7 @@ limitations under the License.
package server
import (
"context"
"crypto/tls"
"io"
"math"
@ -160,9 +161,8 @@ func (s *streamRuntime) PortForward(podSandboxID string, port int32, stream io.R
}
// handleResizing spawns a goroutine that processes the resize channel, calling resizeFunc for each
// remotecommand.TerminalSize received from the channel. The resize channel must be closed elsewhere to stop the
// goroutine.
func handleResizing(resize <-chan remotecommand.TerminalSize, resizeFunc func(size remotecommand.TerminalSize)) {
// remotecommand.TerminalSize received from the channel.
func handleResizing(ctx context.Context, resize <-chan remotecommand.TerminalSize, resizeFunc func(size remotecommand.TerminalSize)) {
if resize == nil {
return
}
@ -171,7 +171,10 @@ func handleResizing(resize <-chan remotecommand.TerminalSize, resizeFunc func(si
defer runtime.HandleCrash()
for {
size, ok := <-resize
select {
case <-ctx.Done():
return
case size, ok := <-resize:
if !ok {
return
}
@ -180,6 +183,7 @@ func handleResizing(resize <-chan remotecommand.TerminalSize, resizeFunc func(si
}
resizeFunc(size)
}
}
}()
}

View File

@ -17,7 +17,7 @@ limitations under the License.
package util
import (
"github.com/docker/distribution/reference"
"github.com/containerd/containerd/reference/docker"
)
// NormalizeImageRef normalizes the image reference following the docker convention. This is added
@ -27,7 +27,7 @@ import (
// sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa will be returned as
// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa.
//
// Deprecated: use github.com/docker/reference.ParseDockerRef() instead
func NormalizeImageRef(ref string) (reference.Named, error) {
return reference.ParseDockerRef(ref)
// Deprecated: use github.com/containerd/containerd/reference/docker.ParseDockerRef() instead
func NormalizeImageRef(ref string) (docker.Named, error) {
return docker.ParseDockerRef(ref)
}

View File

@ -1,7 +1,7 @@
# cri dependencies
github.com/docker/distribution 0d3efadf0154c2b8a4e7b6621fff9809655cc580
github.com/docker/docker 4634ce647cf2ce2c6031129ccd109e557244986f
github.com/opencontainers/selinux 31f70552238c5e017d78c3f1ba65e85f593f48e0 # v1.3.3
github.com/opencontainers/selinux 0d49ba2a6aae052c614dfe5de62a158711a6c461 # v1.5.1
github.com/tchap/go-patricia 666120de432aea38ab06bd5c818f04f4129882c9 # v2.2.6
# containerd dependencies
@ -9,13 +9,13 @@ github.com/beorn7/perks 37c8de3658fcb183f997c4e13e83
github.com/BurntSushi/toml 3012a1dbe2e4bd1391d42b32f0577cb7bbc7f005 # v0.3.1
github.com/cespare/xxhash/v2 d7df74196a9e781ede915320c11c378c1b2f3a1f # v2.1.1
github.com/containerd/cgroups 7347743e5d1e8500d9f27c8e748e689ed991d92b
github.com/containerd/console 8375c3424e4d7b114e8a90a4a40c8e1b40d1d4e6
github.com/containerd/console 8375c3424e4d7b114e8a90a4a40c8e1b40d1d4e6 # v1.0.0
github.com/containerd/containerd 01310155947cb6eec37dcae29742a165e56acb4a
github.com/containerd/continuity 0ec596719c75bfd42908850990acea594b7593ac
github.com/containerd/fifo bda0ff6ed73c67bfb5e62bc9c697f146b7fd7f13
github.com/containerd/go-runc a5c2862aed5e6358b305b0e16bfce58e0549b1cd
github.com/containerd/ttrpc 92c8520ef9f86600c650dd540266a007bf03670f
github.com/containerd/typeurl a93fcdb778cd272c6e9b3028b2f42d813e785d40
github.com/containerd/ttrpc 92c8520ef9f86600c650dd540266a007bf03670f # v1.0.0
github.com/containerd/typeurl a93fcdb778cd272c6e9b3028b2f42d813e785d40 # v1.0.0
github.com/coreos/go-systemd/v22 2d78030078ef61b3cae27f42ad6d0e46db51b339 # v22.0.0
github.com/cpuguy83/go-md2man 7762f7e404f8416dfa1d9bb6a8c192aa9acb4d19 # v1.0.10
github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
@ -50,7 +50,7 @@ go.etcd.io/bbolt a0458a2b35708eef59eb5f620ceb
go.opencensus.io 9c377598961b706d1542bd2d84d538b5094d596e # v0.22.0
golang.org/x/net f3200d17e092c607f615320ecaad13d87ad9a2b3
golang.org/x/sync 42b317875d0fa942474b76e1b46a6060d720ae6e
golang.org/x/sys 52ab431487773bc9dd1b0766228b1cf3944126bf
golang.org/x/sys 5c8b2ff67527cb88b770f693cebf3799036d8bc0
golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4
google.golang.org/genproto e50cd9704f63023d62cd06a1994b98227fc4d21a
google.golang.org/grpc f495f5b15ae7ccda3b38c53a1bfcde4c1a58a2bc # v1.27.1
@ -74,14 +74,14 @@ golang.org/x/oauth2 0f29369cfe4552d0e4bcddc57cc7
golang.org/x/time 9d24e82272b4f38b78bc8cff74fa936d31ccd8ef
gopkg.in/inf.v0 d2d2541c53f18d2a059457998ce2876cc8e67cbf # v0.9.1
gopkg.in/yaml.v2 53403b58ad1b561927d19068c655246f2db79d48 # v2.2.8
k8s.io/api e6bc7324d7efd1c8ab0e68dd8162a2b500b0ce3b # v0.18.0-beta.1
k8s.io/apimachinery 2373d029717c4d169463414a6127cd1d0d12680e # v0.18.0-beta.1
k8s.io/apiserver 9af6eefd238b73bb688c429c32d113d04eeea3a8 # v0.18.0-beta.1
k8s.io/client-go 33a99c0dca04e5e335442262f93df4c3faab201b # v0.18.0-beta.1
k8s.io/cri-api 3d1680d8d202aa12c5dc5689170c3c03a488d35b # v0.18.0-beta.1
k8s.io/api d2dce8e1788e4be2be3a62b6439b3eaa087df0df # v0.18.0
k8s.io/apimachinery 105e0c6d63f10531ed07f3b5a2195771a0fa444b # v0.18.0
k8s.io/apiserver 5c8e895629a454efd75a453d1dea5b8142db0013 # v0.18.0
k8s.io/client-go 0b19784585bd0a0ee5509855829ead81feaa2bdc # v0.18.0
k8s.io/cri-api 3d1680d8d202aa12c5dc5689170c3c03a488d35b # v0.18.0
k8s.io/klog 2ca9ad30301bf30a8a6e0fa2110db6b8df699a91 # v1.0.0
k8s.io/kubernetes f2d7577e31829664899f1b8e3d3a73de8c5f4029 # v1.18.0-beta.1
k8s.io/utils 5f6fbceb4c31d35291b2de756aeaae2ddeee3e92
k8s.io/kubernetes 9e991415386e4cf155a24b1da15becaa390438d8 # v1.18.0
k8s.io/utils a9aa75ae1b89e1b992c33383f48e942d97e52dae
sigs.k8s.io/structured-merge-diff/v3 877aee05330847a873a1a8998b40e12a1e0fde25 # v3.0.0
sigs.k8s.io/yaml 9fc95527decd95bb9d28cc2eab08179b2d0f6971 # v1.2.0
@ -89,6 +89,7 @@ sigs.k8s.io/yaml 9fc95527decd95bb9d28cc2eab08
github.com/containerd/go-cni 0d360c50b10b350b6bb23863fd4dfb1c232b01c9
github.com/containernetworking/cni 4cfb7b568922a3c79a23e438dc52fe537fc9687e # v0.7.1
github.com/containernetworking/plugins 9f96827c7cabb03f21d86326000c00f61e181f6a # v0.7.6
github.com/fsnotify/fsnotify 4bf2d1fec78374803a39307bfb8d340688f4f28e # v1.4.8
# image decrypt depedencies
github.com/containerd/imgcrypt 9e761ccd6069fb707ec9493435f31475b5524b38 # v1.0.1

View File

@ -1,42 +0,0 @@
package reference
import "path"
// IsNameOnly returns true if reference only contains a repo name.
func IsNameOnly(ref Named) bool {
if _, ok := ref.(NamedTagged); ok {
return false
}
if _, ok := ref.(Canonical); ok {
return false
}
return true
}
// FamiliarName returns the familiar name string
// for the given named, familiarizing if needed.
func FamiliarName(ref Named) string {
if nn, ok := ref.(normalizedNamed); ok {
return nn.Familiar().Name()
}
return ref.Name()
}
// FamiliarString returns the familiar string representation
// for the given reference, familiarizing if needed.
func FamiliarString(ref Reference) string {
if nn, ok := ref.(normalizedNamed); ok {
return nn.Familiar().String()
}
return ref.String()
}
// FamiliarMatch reports whether ref matches the specified pattern.
// See https://godoc.org/path#Match for supported patterns.
func FamiliarMatch(pattern string, ref Reference) (bool, error) {
matched, err := path.Match(pattern, FamiliarString(ref))
if namedRef, isNamed := ref.(Named); isNamed && !matched {
matched, _ = path.Match(pattern, FamiliarName(namedRef))
}
return matched, err
}

View File

@ -1,199 +0,0 @@
package reference
import (
"errors"
"fmt"
"strings"
"github.com/docker/distribution/digestset"
"github.com/opencontainers/go-digest"
)
var (
legacyDefaultDomain = "index.docker.io"
defaultDomain = "docker.io"
officialRepoName = "library"
defaultTag = "latest"
)
// normalizedNamed represents a name which has been
// normalized and has a familiar form. A familiar name
// is what is used in Docker UI. An example normalized
// name is "docker.io/library/ubuntu" and corresponding
// familiar name of "ubuntu".
type normalizedNamed interface {
Named
Familiar() Named
}
// ParseNormalizedNamed parses a string into a named reference
// transforming a familiar name from Docker UI to a fully
// qualified reference. If the value may be an identifier
// use ParseAnyReference.
func ParseNormalizedNamed(s string) (Named, error) {
if ok := anchoredIdentifierRegexp.MatchString(s); ok {
return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s)
}
domain, remainder := splitDockerDomain(s)
var remoteName string
if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 {
remoteName = remainder[:tagSep]
} else {
remoteName = remainder
}
if strings.ToLower(remoteName) != remoteName {
return nil, errors.New("invalid reference format: repository name must be lowercase")
}
ref, err := Parse(domain + "/" + remainder)
if err != nil {
return nil, err
}
named, isNamed := ref.(Named)
if !isNamed {
return nil, fmt.Errorf("reference %s has no name", ref.String())
}
return named, nil
}
// ParseDockerRef normalizes the image reference following the docker convention. This is added
// mainly for backward compatibility.
// The reference returned can only be either tagged or digested. For reference contains both tag
// and digest, the function returns digested reference, e.g. docker.io/library/busybox:latest@
// sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa will be returned as
// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa.
func ParseDockerRef(ref string) (Named, error) {
named, err := ParseNormalizedNamed(ref)
if err != nil {
return nil, err
}
if _, ok := named.(NamedTagged); ok {
if canonical, ok := named.(Canonical); ok {
// The reference is both tagged and digested, only
// return digested.
newNamed, err := WithName(canonical.Name())
if err != nil {
return nil, err
}
newCanonical, err := WithDigest(newNamed, canonical.Digest())
if err != nil {
return nil, err
}
return newCanonical, nil
}
}
return TagNameOnly(named), nil
}
// splitDockerDomain splits a repository name to domain and remotename string.
// If no valid domain is found, the default domain is used. Repository name
// needs to be already validated before.
func splitDockerDomain(name string) (domain, remainder string) {
i := strings.IndexRune(name, '/')
if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") {
domain, remainder = defaultDomain, name
} else {
domain, remainder = name[:i], name[i+1:]
}
if domain == legacyDefaultDomain {
domain = defaultDomain
}
if domain == defaultDomain && !strings.ContainsRune(remainder, '/') {
remainder = officialRepoName + "/" + remainder
}
return
}
// familiarizeName returns a shortened version of the name familiar
// to to the Docker UI. Familiar names have the default domain
// "docker.io" and "library/" repository prefix removed.
// For example, "docker.io/library/redis" will have the familiar
// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp".
// Returns a familiarized named only reference.
func familiarizeName(named namedRepository) repository {
repo := repository{
domain: named.Domain(),
path: named.Path(),
}
if repo.domain == defaultDomain {
repo.domain = ""
// Handle official repositories which have the pattern "library/<official repo name>"
if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName {
repo.path = split[1]
}
}
return repo
}
func (r reference) Familiar() Named {
return reference{
namedRepository: familiarizeName(r.namedRepository),
tag: r.tag,
digest: r.digest,
}
}
func (r repository) Familiar() Named {
return familiarizeName(r)
}
func (t taggedReference) Familiar() Named {
return taggedReference{
namedRepository: familiarizeName(t.namedRepository),
tag: t.tag,
}
}
func (c canonicalReference) Familiar() Named {
return canonicalReference{
namedRepository: familiarizeName(c.namedRepository),
digest: c.digest,
}
}
// TagNameOnly adds the default tag "latest" to a reference if it only has
// a repo name.
func TagNameOnly(ref Named) Named {
if IsNameOnly(ref) {
namedTagged, err := WithTag(ref, defaultTag)
if err != nil {
// Default tag must be valid, to create a NamedTagged
// type with non-validated input the WithTag function
// should be used instead
panic(err)
}
return namedTagged
}
return ref
}
// ParseAnyReference parses a reference string as a possible identifier,
// full digest, or familiar name.
func ParseAnyReference(ref string) (Reference, error) {
if ok := anchoredIdentifierRegexp.MatchString(ref); ok {
return digestReference("sha256:" + ref), nil
}
if dgst, err := digest.Parse(ref); err == nil {
return digestReference(dgst), nil
}
return ParseNormalizedNamed(ref)
}
// ParseAnyReferenceWithSet parses a reference string as a possible short
// identifier to be matched in a digest set, a full digest, or familiar name.
func ParseAnyReferenceWithSet(ref string, ds *digestset.Set) (Reference, error) {
if ok := anchoredShortIdentifierRegexp.MatchString(ref); ok {
dgst, err := ds.Lookup(ref)
if err == nil {
return digestReference(dgst), nil
}
} else {
if dgst, err := digest.Parse(ref); err == nil {
return digestReference(dgst), nil
}
}
return ParseNormalizedNamed(ref)
}

View File

@ -1,433 +0,0 @@
// Package reference provides a general type to represent any way of referencing images within the registry.
// Its main purpose is to abstract tags and digests (content-addressable hash).
//
// Grammar
//
// reference := name [ ":" tag ] [ "@" digest ]
// name := [domain '/'] path-component ['/' path-component]*
// domain := domain-component ['.' domain-component]* [':' port-number]
// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
// port-number := /[0-9]+/
// path-component := alpha-numeric [separator alpha-numeric]*
// alpha-numeric := /[a-z0-9]+/
// separator := /[_.]|__|[-]*/
//
// tag := /[\w][\w.-]{0,127}/
//
// digest := digest-algorithm ":" digest-hex
// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]*
// digest-algorithm-separator := /[+.-_]/
// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/
// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value
//
// identifier := /[a-f0-9]{64}/
// short-identifier := /[a-f0-9]{6,64}/
package reference
import (
"errors"
"fmt"
"strings"
"github.com/opencontainers/go-digest"
)
const (
// NameTotalLengthMax is the maximum total number of characters in a repository name.
NameTotalLengthMax = 255
)
var (
// ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference.
ErrReferenceInvalidFormat = errors.New("invalid reference format")
// ErrTagInvalidFormat represents an error while trying to parse a string as a tag.
ErrTagInvalidFormat = errors.New("invalid tag format")
// ErrDigestInvalidFormat represents an error while trying to parse a string as a tag.
ErrDigestInvalidFormat = errors.New("invalid digest format")
// ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters.
ErrNameContainsUppercase = errors.New("repository name must be lowercase")
// ErrNameEmpty is returned for empty, invalid repository names.
ErrNameEmpty = errors.New("repository name must have at least one component")
// ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax.
ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax)
// ErrNameNotCanonical is returned when a name is not canonical.
ErrNameNotCanonical = errors.New("repository name must be canonical")
)
// Reference is an opaque object reference identifier that may include
// modifiers such as a hostname, name, tag, and digest.
type Reference interface {
// String returns the full reference
String() string
}
// Field provides a wrapper type for resolving correct reference types when
// working with encoding.
type Field struct {
reference Reference
}
// AsField wraps a reference in a Field for encoding.
func AsField(reference Reference) Field {
return Field{reference}
}
// Reference unwraps the reference type from the field to
// return the Reference object. This object should be
// of the appropriate type to further check for different
// reference types.
func (f Field) Reference() Reference {
return f.reference
}
// MarshalText serializes the field to byte text which
// is the string of the reference.
func (f Field) MarshalText() (p []byte, err error) {
return []byte(f.reference.String()), nil
}
// UnmarshalText parses text bytes by invoking the
// reference parser to ensure the appropriately
// typed reference object is wrapped by field.
func (f *Field) UnmarshalText(p []byte) error {
r, err := Parse(string(p))
if err != nil {
return err
}
f.reference = r
return nil
}
// Named is an object with a full name
type Named interface {
Reference
Name() string
}
// Tagged is an object which has a tag
type Tagged interface {
Reference
Tag() string
}
// NamedTagged is an object including a name and tag.
type NamedTagged interface {
Named
Tag() string
}
// Digested is an object which has a digest
// in which it can be referenced by
type Digested interface {
Reference
Digest() digest.Digest
}
// Canonical reference is an object with a fully unique
// name including a name with domain and digest
type Canonical interface {
Named
Digest() digest.Digest
}
// namedRepository is a reference to a repository with a name.
// A namedRepository has both domain and path components.
type namedRepository interface {
Named
Domain() string
Path() string
}
// Domain returns the domain part of the Named reference
func Domain(named Named) string {
if r, ok := named.(namedRepository); ok {
return r.Domain()
}
domain, _ := splitDomain(named.Name())
return domain
}
// Path returns the name without the domain part of the Named reference
func Path(named Named) (name string) {
if r, ok := named.(namedRepository); ok {
return r.Path()
}
_, path := splitDomain(named.Name())
return path
}
func splitDomain(name string) (string, string) {
match := anchoredNameRegexp.FindStringSubmatch(name)
if len(match) != 3 {
return "", name
}
return match[1], match[2]
}
// SplitHostname splits a named reference into a
// hostname and name string. If no valid hostname is
// found, the hostname is empty and the full value
// is returned as name
// DEPRECATED: Use Domain or Path
func SplitHostname(named Named) (string, string) {
if r, ok := named.(namedRepository); ok {
return r.Domain(), r.Path()
}
return splitDomain(named.Name())
}
// Parse parses s and returns a syntactically valid Reference.
// If an error was encountered it is returned, along with a nil Reference.
// NOTE: Parse will not handle short digests.
func Parse(s string) (Reference, error) {
matches := ReferenceRegexp.FindStringSubmatch(s)
if matches == nil {
if s == "" {
return nil, ErrNameEmpty
}
if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil {
return nil, ErrNameContainsUppercase
}
return nil, ErrReferenceInvalidFormat
}
if len(matches[1]) > NameTotalLengthMax {
return nil, ErrNameTooLong
}
var repo repository
nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1])
if len(nameMatch) == 3 {
repo.domain = nameMatch[1]
repo.path = nameMatch[2]
} else {
repo.domain = ""
repo.path = matches[1]
}
ref := reference{
namedRepository: repo,
tag: matches[2],
}
if matches[3] != "" {
var err error
ref.digest, err = digest.Parse(matches[3])
if err != nil {
return nil, err
}
}
r := getBestReferenceType(ref)
if r == nil {
return nil, ErrNameEmpty
}
return r, nil
}
// ParseNamed parses s and returns a syntactically valid reference implementing
// the Named interface. The reference must have a name and be in the canonical
// form, otherwise an error is returned.
// If an error was encountered it is returned, along with a nil Reference.
// NOTE: ParseNamed will not handle short digests.
func ParseNamed(s string) (Named, error) {
named, err := ParseNormalizedNamed(s)
if err != nil {
return nil, err
}
if named.String() != s {
return nil, ErrNameNotCanonical
}
return named, nil
}
// WithName returns a named object representing the given string. If the input
// is invalid ErrReferenceInvalidFormat will be returned.
func WithName(name string) (Named, error) {
if len(name) > NameTotalLengthMax {
return nil, ErrNameTooLong
}
match := anchoredNameRegexp.FindStringSubmatch(name)
if match == nil || len(match) != 3 {
return nil, ErrReferenceInvalidFormat
}
return repository{
domain: match[1],
path: match[2],
}, nil
}
// WithTag combines the name from "name" and the tag from "tag" to form a
// reference incorporating both the name and the tag.
func WithTag(name Named, tag string) (NamedTagged, error) {
if !anchoredTagRegexp.MatchString(tag) {
return nil, ErrTagInvalidFormat
}
var repo repository
if r, ok := name.(namedRepository); ok {
repo.domain = r.Domain()
repo.path = r.Path()
} else {
repo.path = name.Name()
}
if canonical, ok := name.(Canonical); ok {
return reference{
namedRepository: repo,
tag: tag,
digest: canonical.Digest(),
}, nil
}
return taggedReference{
namedRepository: repo,
tag: tag,
}, nil
}
// WithDigest combines the name from "name" and the digest from "digest" to form
// a reference incorporating both the name and the digest.
func WithDigest(name Named, digest digest.Digest) (Canonical, error) {
if !anchoredDigestRegexp.MatchString(digest.String()) {
return nil, ErrDigestInvalidFormat
}
var repo repository
if r, ok := name.(namedRepository); ok {
repo.domain = r.Domain()
repo.path = r.Path()
} else {
repo.path = name.Name()
}
if tagged, ok := name.(Tagged); ok {
return reference{
namedRepository: repo,
tag: tagged.Tag(),
digest: digest,
}, nil
}
return canonicalReference{
namedRepository: repo,
digest: digest,
}, nil
}
// TrimNamed removes any tag or digest from the named reference.
func TrimNamed(ref Named) Named {
domain, path := SplitHostname(ref)
return repository{
domain: domain,
path: path,
}
}
func getBestReferenceType(ref reference) Reference {
if ref.Name() == "" {
// Allow digest only references
if ref.digest != "" {
return digestReference(ref.digest)
}
return nil
}
if ref.tag == "" {
if ref.digest != "" {
return canonicalReference{
namedRepository: ref.namedRepository,
digest: ref.digest,
}
}
return ref.namedRepository
}
if ref.digest == "" {
return taggedReference{
namedRepository: ref.namedRepository,
tag: ref.tag,
}
}
return ref
}
type reference struct {
namedRepository
tag string
digest digest.Digest
}
func (r reference) String() string {
return r.Name() + ":" + r.tag + "@" + r.digest.String()
}
func (r reference) Tag() string {
return r.tag
}
func (r reference) Digest() digest.Digest {
return r.digest
}
type repository struct {
domain string
path string
}
func (r repository) String() string {
return r.Name()
}
func (r repository) Name() string {
if r.domain == "" {
return r.path
}
return r.domain + "/" + r.path
}
func (r repository) Domain() string {
return r.domain
}
func (r repository) Path() string {
return r.path
}
type digestReference digest.Digest
func (d digestReference) String() string {
return digest.Digest(d).String()
}
func (d digestReference) Digest() digest.Digest {
return digest.Digest(d)
}
type taggedReference struct {
namedRepository
tag string
}
func (t taggedReference) String() string {
return t.Name() + ":" + t.tag
}
func (t taggedReference) Tag() string {
return t.tag
}
type canonicalReference struct {
namedRepository
digest digest.Digest
}
func (c canonicalReference) String() string {
return c.Name() + "@" + c.digest.String()
}
func (c canonicalReference) Digest() digest.Digest {
return c.digest
}

View File

@ -1,143 +0,0 @@
package reference
import "regexp"
var (
// alphaNumericRegexp defines the alpha numeric atom, typically a
// component of names. This only allows lower case characters and digits.
alphaNumericRegexp = match(`[a-z0-9]+`)
// separatorRegexp defines the separators allowed to be embedded in name
// components. This allow one period, one or two underscore and multiple
// dashes.
separatorRegexp = match(`(?:[._]|__|[-]*)`)
// nameComponentRegexp restricts registry path component names to start
// with at least one letter or number, with following parts able to be
// separated by one period, one or two underscore and multiple dashes.
nameComponentRegexp = expression(
alphaNumericRegexp,
optional(repeated(separatorRegexp, alphaNumericRegexp)))
// domainComponentRegexp restricts the registry domain component of a
// repository name to start with a component as defined by DomainRegexp
// and followed by an optional port.
domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`)
// DomainRegexp defines the structure of potential domain components
// that may be part of image names. This is purposely a subset of what is
// allowed by DNS to ensure backwards compatibility with Docker image
// names.
DomainRegexp = expression(
domainComponentRegexp,
optional(repeated(literal(`.`), domainComponentRegexp)),
optional(literal(`:`), match(`[0-9]+`)))
// TagRegexp matches valid tag names. From docker/docker:graph/tags.go.
TagRegexp = match(`[\w][\w.-]{0,127}`)
// anchoredTagRegexp matches valid tag names, anchored at the start and
// end of the matched string.
anchoredTagRegexp = anchored(TagRegexp)
// DigestRegexp matches valid digests.
DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`)
// anchoredDigestRegexp matches valid digests, anchored at the start and
// end of the matched string.
anchoredDigestRegexp = anchored(DigestRegexp)
// NameRegexp is the format for the name component of references. The
// regexp has capturing groups for the domain and name part omitting
// the separating forward slash from either.
NameRegexp = expression(
optional(DomainRegexp, literal(`/`)),
nameComponentRegexp,
optional(repeated(literal(`/`), nameComponentRegexp)))
// anchoredNameRegexp is used to parse a name value, capturing the
// domain and trailing components.
anchoredNameRegexp = anchored(
optional(capture(DomainRegexp), literal(`/`)),
capture(nameComponentRegexp,
optional(repeated(literal(`/`), nameComponentRegexp))))
// ReferenceRegexp is the full supported format of a reference. The regexp
// is anchored and has capturing groups for name, tag, and digest
// components.
ReferenceRegexp = anchored(capture(NameRegexp),
optional(literal(":"), capture(TagRegexp)),
optional(literal("@"), capture(DigestRegexp)))
// IdentifierRegexp is the format for string identifier used as a
// content addressable identifier using sha256. These identifiers
// are like digests without the algorithm, since sha256 is used.
IdentifierRegexp = match(`([a-f0-9]{64})`)
// ShortIdentifierRegexp is the format used to represent a prefix
// of an identifier. A prefix may be used to match a sha256 identifier
// within a list of trusted identifiers.
ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`)
// anchoredIdentifierRegexp is used to check or match an
// identifier value, anchored at start and end of string.
anchoredIdentifierRegexp = anchored(IdentifierRegexp)
// anchoredShortIdentifierRegexp is used to check if a value
// is a possible identifier prefix, anchored at start and end
// of string.
anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp)
)
// match compiles the string to a regular expression.
var match = regexp.MustCompile
// literal compiles s into a literal regular expression, escaping any regexp
// reserved characters.
func literal(s string) *regexp.Regexp {
re := match(regexp.QuoteMeta(s))
if _, complete := re.LiteralPrefix(); !complete {
panic("must be a literal")
}
return re
}
// expression defines a full expression, where each regular expression must
// follow the previous.
func expression(res ...*regexp.Regexp) *regexp.Regexp {
var s string
for _, re := range res {
s += re.String()
}
return match(s)
}
// optional wraps the expression in a non-capturing group and makes the
// production optional.
func optional(res ...*regexp.Regexp) *regexp.Regexp {
return match(group(expression(res...)).String() + `?`)
}
// repeated wraps the regexp in a non-capturing group to get one or more
// matches.
func repeated(res ...*regexp.Regexp) *regexp.Regexp {
return match(group(expression(res...)).String() + `+`)
}
// group wraps the regexp in a non-capturing group.
func group(res ...*regexp.Regexp) *regexp.Regexp {
return match(`(?:` + expression(res...).String() + `)`)
}
// capture wraps the expression in a capturing group.
func capture(res ...*regexp.Regexp) *regexp.Regexp {
return match(`(` + expression(res...).String() + `)`)
}
// anchored anchors the regular expression by adding start and end delimiters.
func anchored(res ...*regexp.Regexp) *regexp.Regexp {
return match(`^` + expression(res...).String() + `$`)
}

28
vendor/github.com/fsnotify/fsnotify/LICENSE generated vendored Normal file
View File

@ -0,0 +1,28 @@
Copyright (c) 2012 The Go Authors. All rights reserved.
Copyright (c) 2012-2019 fsnotify Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

83
vendor/github.com/fsnotify/fsnotify/README.md generated vendored Normal file
View File

@ -0,0 +1,83 @@
# File system notifications for Go
[![GoDoc](https://godoc.org/github.com/fsnotify/fsnotify?status.svg)](https://godoc.org/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify)
fsnotify utilizes [golang.org/x/sys](https://godoc.org/golang.org/x/sys) rather than `syscall` from the standard library. Ensure you have the latest version installed by running:
```console
go get -u golang.org/x/sys/...
```
Cross platform: Windows, Linux, BSD and macOS.
| Adapter | OS | Status |
| --------------------- | -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- |
| inotify | Linux 2.6.27 or later, Android\* | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) |
| kqueue | BSD, macOS, iOS\* | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) |
| ReadDirectoryChangesW | Windows | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) |
| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) |
| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/issues/12) |
| fanotify | Linux 2.6.37+ | [Planned](https://github.com/fsnotify/fsnotify/issues/114) |
| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) |
| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) |
\* Android and iOS are untested.
Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information.
## API stability
fsnotify is a fork of [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA).
All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). Further API changes are [planned](https://github.com/fsnotify/fsnotify/milestones), and will be tagged with a new major revision number.
Go 1.6 supports dependencies located in the `vendor/` folder. Unless you are creating a library, it is recommended that you copy fsnotify into `vendor/github.com/fsnotify/fsnotify` within your project, and likewise for `golang.org/x/sys`.
## Contributing
Please refer to [CONTRIBUTING][] before opening an issue or pull request.
## Example
See [example_test.go](https://github.com/fsnotify/fsnotify/blob/master/example_test.go).
## FAQ
**When a file is moved to another directory is it still being watched?**
No (it shouldn't be, unless you are watching where it was moved to).
**When I watch a directory, are all subdirectories watched as well?**
No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]).
**Do I have to watch the Error and Event channels in a separate goroutine?**
As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7])
**Why am I receiving multiple events for the same file on OS X?**
Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]).
**How many files can be watched at once?**
There are OS-specific limits as to how many watches can be created:
* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error.
* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error.
**Why don't notifications work with NFS filesystems or filesystem in userspace (FUSE)?**
fsnotify requires support from underlying OS to work. The current NFS protocol does not provide network level support for file notifications.
[#62]: https://github.com/howeyc/fsnotify/issues/62
[#18]: https://github.com/fsnotify/fsnotify/issues/18
[#11]: https://github.com/fsnotify/fsnotify/issues/11
[#7]: https://github.com/howeyc/fsnotify/issues/7
[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md
## Related Projects
* [notify](https://github.com/rjeczalik/notify)
* [fsevents](https://github.com/fsnotify/fsevents)

37
vendor/github.com/fsnotify/fsnotify/fen.go generated vendored Normal file
View File

@ -0,0 +1,37 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build solaris
package fsnotify
import (
"errors"
)
// Watcher watches a set of files, delivering events to a channel.
type Watcher struct {
Events chan Event
Errors chan error
}
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
func NewWatcher() (*Watcher, error) {
return nil, errors.New("FEN based watcher not yet supported for fsnotify\n")
}
// Close removes all watches and closes the events channel.
func (w *Watcher) Close() error {
return nil
}
// Add starts watching the named file or directory (non-recursively).
func (w *Watcher) Add(name string) error {
return nil
}
// Remove stops watching the the named file or directory (non-recursively).
func (w *Watcher) Remove(name string) error {
return nil
}

68
vendor/github.com/fsnotify/fsnotify/fsnotify.go generated vendored Normal file
View File

@ -0,0 +1,68 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !plan9
// Package fsnotify provides a platform-independent interface for file system notifications.
package fsnotify
import (
"bytes"
"errors"
"fmt"
)
// Event represents a single file system notification.
type Event struct {
Name string // Relative path to the file or directory.
Op Op // File operation that triggered the event.
}
// Op describes a set of file operations.
type Op uint32
// These are the generalized file operations that can trigger a notification.
const (
Create Op = 1 << iota
Write
Remove
Rename
Chmod
)
func (op Op) String() string {
// Use a buffer for efficient string concatenation
var buffer bytes.Buffer
if op&Create == Create {
buffer.WriteString("|CREATE")
}
if op&Remove == Remove {
buffer.WriteString("|REMOVE")
}
if op&Write == Write {
buffer.WriteString("|WRITE")
}
if op&Rename == Rename {
buffer.WriteString("|RENAME")
}
if op&Chmod == Chmod {
buffer.WriteString("|CHMOD")
}
if buffer.Len() == 0 {
return ""
}
return buffer.String()[1:] // Strip leading pipe
}
// String returns a string representation of the event in the form
// "file: REMOVE|WRITE|..."
func (e Event) String() string {
return fmt.Sprintf("%q: %s", e.Name, e.Op.String())
}
// Common errors that can be reported by a watcher
var (
ErrEventOverflow = errors.New("fsnotify queue overflow")
)

5
vendor/github.com/fsnotify/fsnotify/go.mod generated vendored Normal file
View File

@ -0,0 +1,5 @@
module github.com/fsnotify/fsnotify
go 1.13
require golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9

337
vendor/github.com/fsnotify/fsnotify/inotify.go generated vendored Normal file
View File

@ -0,0 +1,337 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux
package fsnotify
import (
"errors"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"sync"
"unsafe"
"golang.org/x/sys/unix"
)
// Watcher watches a set of files, delivering events to a channel.
type Watcher struct {
Events chan Event
Errors chan error
mu sync.Mutex // Map access
fd int
poller *fdPoller
watches map[string]*watch // Map of inotify watches (key: path)
paths map[int]string // Map of watched paths (key: watch descriptor)
done chan struct{} // Channel for sending a "quit message" to the reader goroutine
doneResp chan struct{} // Channel to respond to Close
}
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
func NewWatcher() (*Watcher, error) {
// Create inotify fd
fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC)
if fd == -1 {
return nil, errno
}
// Create epoll
poller, err := newFdPoller(fd)
if err != nil {
unix.Close(fd)
return nil, err
}
w := &Watcher{
fd: fd,
poller: poller,
watches: make(map[string]*watch),
paths: make(map[int]string),
Events: make(chan Event),
Errors: make(chan error),
done: make(chan struct{}),
doneResp: make(chan struct{}),
}
go w.readEvents()
return w, nil
}
func (w *Watcher) isClosed() bool {
select {
case <-w.done:
return true
default:
return false
}
}
// Close removes all watches and closes the events channel.
func (w *Watcher) Close() error {
if w.isClosed() {
return nil
}
// Send 'close' signal to goroutine, and set the Watcher to closed.
close(w.done)
// Wake up goroutine
w.poller.wake()
// Wait for goroutine to close
<-w.doneResp
return nil
}
// Add starts watching the named file or directory (non-recursively).
func (w *Watcher) Add(name string) error {
name = filepath.Clean(name)
if w.isClosed() {
return errors.New("inotify instance already closed")
}
const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM |
unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY |
unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF
var flags uint32 = agnosticEvents
w.mu.Lock()
defer w.mu.Unlock()
watchEntry := w.watches[name]
if watchEntry != nil {
flags |= watchEntry.flags | unix.IN_MASK_ADD
}
wd, errno := unix.InotifyAddWatch(w.fd, name, flags)
if wd == -1 {
return errno
}
if watchEntry == nil {
w.watches[name] = &watch{wd: uint32(wd), flags: flags}
w.paths[wd] = name
} else {
watchEntry.wd = uint32(wd)
watchEntry.flags = flags
}
return nil
}
// Remove stops watching the named file or directory (non-recursively).
func (w *Watcher) Remove(name string) error {
name = filepath.Clean(name)
// Fetch the watch.
w.mu.Lock()
defer w.mu.Unlock()
watch, ok := w.watches[name]
// Remove it from inotify.
if !ok {
return fmt.Errorf("can't remove non-existent inotify watch for: %s", name)
}
// We successfully removed the watch if InotifyRmWatch doesn't return an
// error, we need to clean up our internal state to ensure it matches
// inotify's kernel state.
delete(w.paths, int(watch.wd))
delete(w.watches, name)
// inotify_rm_watch will return EINVAL if the file has been deleted;
// the inotify will already have been removed.
// watches and pathes are deleted in ignoreLinux() implicitly and asynchronously
// by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE
// so that EINVAL means that the wd is being rm_watch()ed or its file removed
// by another thread and we have not received IN_IGNORE event.
success, errno := unix.InotifyRmWatch(w.fd, watch.wd)
if success == -1 {
// TODO: Perhaps it's not helpful to return an error here in every case.
// the only two possible errors are:
// EBADF, which happens when w.fd is not a valid file descriptor of any kind.
// EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor.
// Watch descriptors are invalidated when they are removed explicitly or implicitly;
// explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted.
return errno
}
return nil
}
type watch struct {
wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
}
// readEvents reads from the inotify file descriptor, converts the
// received events into Event objects and sends them via the Events channel
func (w *Watcher) readEvents() {
var (
buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
n int // Number of bytes read with read()
errno error // Syscall errno
ok bool // For poller.wait
)
defer close(w.doneResp)
defer close(w.Errors)
defer close(w.Events)
defer unix.Close(w.fd)
defer w.poller.close()
for {
// See if we have been closed.
if w.isClosed() {
return
}
ok, errno = w.poller.wait()
if errno != nil {
select {
case w.Errors <- errno:
case <-w.done:
return
}
continue
}
if !ok {
continue
}
n, errno = unix.Read(w.fd, buf[:])
// If a signal interrupted execution, see if we've been asked to close, and try again.
// http://man7.org/linux/man-pages/man7/signal.7.html :
// "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable"
if errno == unix.EINTR {
continue
}
// unix.Read might have been woken up by Close. If so, we're done.
if w.isClosed() {
return
}
if n < unix.SizeofInotifyEvent {
var err error
if n == 0 {
// If EOF is received. This should really never happen.
err = io.EOF
} else if n < 0 {
// If an error occurred while reading.
err = errno
} else {
// Read was too short.
err = errors.New("notify: short read in readEvents()")
}
select {
case w.Errors <- err:
case <-w.done:
return
}
continue
}
var offset uint32
// We don't know how many events we just read into the buffer
// While the offset points to at least one whole event...
for offset <= uint32(n-unix.SizeofInotifyEvent) {
// Point "raw" to the event in the buffer
raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset]))
mask := uint32(raw.Mask)
nameLen := uint32(raw.Len)
if mask&unix.IN_Q_OVERFLOW != 0 {
select {
case w.Errors <- ErrEventOverflow:
case <-w.done:
return
}
}
// If the event happened to the watched directory or the watched file, the kernel
// doesn't append the filename to the event, but we would like to always fill the
// the "Name" field with a valid filename. We retrieve the path of the watch from
// the "paths" map.
w.mu.Lock()
name, ok := w.paths[int(raw.Wd)]
// IN_DELETE_SELF occurs when the file/directory being watched is removed.
// This is a sign to clean up the maps, otherwise we are no longer in sync
// with the inotify kernel state which has already deleted the watch
// automatically.
if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
delete(w.paths, int(raw.Wd))
delete(w.watches, name)
}
w.mu.Unlock()
if nameLen > 0 {
// Point "bytes" at the first byte of the filename
bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))
// The filename is padded with NULL bytes. TrimRight() gets rid of those.
name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
}
event := newEvent(name, mask)
// Send the events that are not ignored on the events channel
if !event.ignoreLinux(mask) {
select {
case w.Events <- event:
case <-w.done:
return
}
}
// Move to the next event in the buffer
offset += unix.SizeofInotifyEvent + nameLen
}
}
}
// Certain types of events can be "ignored" and not sent over the Events
// channel. Such as events marked ignore by the kernel, or MODIFY events
// against files that do not exist.
func (e *Event) ignoreLinux(mask uint32) bool {
// Ignore anything the inotify API says to ignore
if mask&unix.IN_IGNORED == unix.IN_IGNORED {
return true
}
// If the event is not a DELETE or RENAME, the file must exist.
// Otherwise the event is ignored.
// *Note*: this was put in place because it was seen that a MODIFY
// event was sent after the DELETE. This ignores that MODIFY and
// assumes a DELETE will come or has come if the file doesn't exist.
if !(e.Op&Remove == Remove || e.Op&Rename == Rename) {
_, statErr := os.Lstat(e.Name)
return os.IsNotExist(statErr)
}
return false
}
// newEvent returns an platform-independent Event based on an inotify mask.
func newEvent(name string, mask uint32) Event {
e := Event{Name: name}
if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
e.Op |= Create
}
if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE {
e.Op |= Remove
}
if mask&unix.IN_MODIFY == unix.IN_MODIFY {
e.Op |= Write
}
if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
e.Op |= Rename
}
if mask&unix.IN_ATTRIB == unix.IN_ATTRIB {
e.Op |= Chmod
}
return e
}

187
vendor/github.com/fsnotify/fsnotify/inotify_poller.go generated vendored Normal file
View File

@ -0,0 +1,187 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux
package fsnotify
import (
"errors"
"golang.org/x/sys/unix"
)
type fdPoller struct {
fd int // File descriptor (as returned by the inotify_init() syscall)
epfd int // Epoll file descriptor
pipe [2]int // Pipe for waking up
}
func emptyPoller(fd int) *fdPoller {
poller := new(fdPoller)
poller.fd = fd
poller.epfd = -1
poller.pipe[0] = -1
poller.pipe[1] = -1
return poller
}
// Create a new inotify poller.
// This creates an inotify handler, and an epoll handler.
func newFdPoller(fd int) (*fdPoller, error) {
var errno error
poller := emptyPoller(fd)
defer func() {
if errno != nil {
poller.close()
}
}()
poller.fd = fd
// Create epoll fd
poller.epfd, errno = unix.EpollCreate1(unix.EPOLL_CLOEXEC)
if poller.epfd == -1 {
return nil, errno
}
// Create pipe; pipe[0] is the read end, pipe[1] the write end.
errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK|unix.O_CLOEXEC)
if errno != nil {
return nil, errno
}
// Register inotify fd with epoll
event := unix.EpollEvent{
Fd: int32(poller.fd),
Events: unix.EPOLLIN,
}
errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event)
if errno != nil {
return nil, errno
}
// Register pipe fd with epoll
event = unix.EpollEvent{
Fd: int32(poller.pipe[0]),
Events: unix.EPOLLIN,
}
errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event)
if errno != nil {
return nil, errno
}
return poller, nil
}
// Wait using epoll.
// Returns true if something is ready to be read,
// false if there is not.
func (poller *fdPoller) wait() (bool, error) {
// 3 possible events per fd, and 2 fds, makes a maximum of 6 events.
// I don't know whether epoll_wait returns the number of events returned,
// or the total number of events ready.
// I decided to catch both by making the buffer one larger than the maximum.
events := make([]unix.EpollEvent, 7)
for {
n, errno := unix.EpollWait(poller.epfd, events, -1)
if n == -1 {
if errno == unix.EINTR {
continue
}
return false, errno
}
if n == 0 {
// If there are no events, try again.
continue
}
if n > 6 {
// This should never happen. More events were returned than should be possible.
return false, errors.New("epoll_wait returned more events than I know what to do with")
}
ready := events[:n]
epollhup := false
epollerr := false
epollin := false
for _, event := range ready {
if event.Fd == int32(poller.fd) {
if event.Events&unix.EPOLLHUP != 0 {
// This should not happen, but if it does, treat it as a wakeup.
epollhup = true
}
if event.Events&unix.EPOLLERR != 0 {
// If an error is waiting on the file descriptor, we should pretend
// something is ready to read, and let unix.Read pick up the error.
epollerr = true
}
if event.Events&unix.EPOLLIN != 0 {
// There is data to read.
epollin = true
}
}
if event.Fd == int32(poller.pipe[0]) {
if event.Events&unix.EPOLLHUP != 0 {
// Write pipe descriptor was closed, by us. This means we're closing down the
// watcher, and we should wake up.
}
if event.Events&unix.EPOLLERR != 0 {
// If an error is waiting on the pipe file descriptor.
// This is an absolute mystery, and should never ever happen.
return false, errors.New("Error on the pipe descriptor.")
}
if event.Events&unix.EPOLLIN != 0 {
// This is a regular wakeup, so we have to clear the buffer.
err := poller.clearWake()
if err != nil {
return false, err
}
}
}
}
if epollhup || epollerr || epollin {
return true, nil
}
return false, nil
}
}
// Close the write end of the poller.
func (poller *fdPoller) wake() error {
buf := make([]byte, 1)
n, errno := unix.Write(poller.pipe[1], buf)
if n == -1 {
if errno == unix.EAGAIN {
// Buffer is full, poller will wake.
return nil
}
return errno
}
return nil
}
func (poller *fdPoller) clearWake() error {
// You have to be woken up a LOT in order to get to 100!
buf := make([]byte, 100)
n, errno := unix.Read(poller.pipe[0], buf)
if n == -1 {
if errno == unix.EAGAIN {
// Buffer is empty, someone else cleared our wake.
return nil
}
return errno
}
return nil
}
// Close all poller file descriptors, but not the one passed to it.
func (poller *fdPoller) close() {
if poller.pipe[1] != -1 {
unix.Close(poller.pipe[1])
}
if poller.pipe[0] != -1 {
unix.Close(poller.pipe[0])
}
if poller.epfd != -1 {
unix.Close(poller.epfd)
}
}

521
vendor/github.com/fsnotify/fsnotify/kqueue.go generated vendored Normal file
View File

@ -0,0 +1,521 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build freebsd openbsd netbsd dragonfly darwin
package fsnotify
import (
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sync"
"time"
"golang.org/x/sys/unix"
)
// Watcher watches a set of files, delivering events to a channel.
type Watcher struct {
Events chan Event
Errors chan error
done chan struct{} // Channel for sending a "quit message" to the reader goroutine
kq int // File descriptor (as returned by the kqueue() syscall).
mu sync.Mutex // Protects access to watcher data
watches map[string]int // Map of watched file descriptors (key: path).
externalWatches map[string]bool // Map of watches added by user of the library.
dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue.
paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events.
fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events).
isClosed bool // Set to true when Close() is first called
}
type pathInfo struct {
name string
isDir bool
}
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
func NewWatcher() (*Watcher, error) {
kq, err := kqueue()
if err != nil {
return nil, err
}
w := &Watcher{
kq: kq,
watches: make(map[string]int),
dirFlags: make(map[string]uint32),
paths: make(map[int]pathInfo),
fileExists: make(map[string]bool),
externalWatches: make(map[string]bool),
Events: make(chan Event),
Errors: make(chan error),
done: make(chan struct{}),
}
go w.readEvents()
return w, nil
}
// Close removes all watches and closes the events channel.
func (w *Watcher) Close() error {
w.mu.Lock()
if w.isClosed {
w.mu.Unlock()
return nil
}
w.isClosed = true
// copy paths to remove while locked
var pathsToRemove = make([]string, 0, len(w.watches))
for name := range w.watches {
pathsToRemove = append(pathsToRemove, name)
}
w.mu.Unlock()
// unlock before calling Remove, which also locks
for _, name := range pathsToRemove {
w.Remove(name)
}
// send a "quit" message to the reader goroutine
close(w.done)
return nil
}
// Add starts watching the named file or directory (non-recursively).
func (w *Watcher) Add(name string) error {
w.mu.Lock()
w.externalWatches[name] = true
w.mu.Unlock()
_, err := w.addWatch(name, noteAllEvents)
return err
}
// Remove stops watching the the named file or directory (non-recursively).
func (w *Watcher) Remove(name string) error {
name = filepath.Clean(name)
w.mu.Lock()
watchfd, ok := w.watches[name]
w.mu.Unlock()
if !ok {
return fmt.Errorf("can't remove non-existent kevent watch for: %s", name)
}
const registerRemove = unix.EV_DELETE
if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil {
return err
}
unix.Close(watchfd)
w.mu.Lock()
isDir := w.paths[watchfd].isDir
delete(w.watches, name)
delete(w.paths, watchfd)
delete(w.dirFlags, name)
w.mu.Unlock()
// Find all watched paths that are in this directory that are not external.
if isDir {
var pathsToRemove []string
w.mu.Lock()
for _, path := range w.paths {
wdir, _ := filepath.Split(path.name)
if filepath.Clean(wdir) == name {
if !w.externalWatches[path.name] {
pathsToRemove = append(pathsToRemove, path.name)
}
}
}
w.mu.Unlock()
for _, name := range pathsToRemove {
// Since these are internal, not much sense in propagating error
// to the user, as that will just confuse them with an error about
// a path they did not explicitly watch themselves.
w.Remove(name)
}
}
return nil
}
// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME
// keventWaitTime to block on each read from kevent
var keventWaitTime = durationToTimespec(100 * time.Millisecond)
// addWatch adds name to the watched file set.
// The flags are interpreted as described in kevent(2).
// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks.
func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
var isDir bool
// Make ./name and name equivalent
name = filepath.Clean(name)
w.mu.Lock()
if w.isClosed {
w.mu.Unlock()
return "", errors.New("kevent instance already closed")
}
watchfd, alreadyWatching := w.watches[name]
// We already have a watch, but we can still override flags.
if alreadyWatching {
isDir = w.paths[watchfd].isDir
}
w.mu.Unlock()
if !alreadyWatching {
fi, err := os.Lstat(name)
if err != nil {
return "", err
}
// Don't watch sockets.
if fi.Mode()&os.ModeSocket == os.ModeSocket {
return "", nil
}
// Don't watch named pipes.
if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe {
return "", nil
}
// Follow Symlinks
// Unfortunately, Linux can add bogus symlinks to watch list without
// issue, and Windows can't do symlinks period (AFAIK). To maintain
// consistency, we will act like everything is fine. There will simply
// be no file events for broken symlinks.
// Hence the returns of nil on errors.
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
name, err = filepath.EvalSymlinks(name)
if err != nil {
return "", nil
}
w.mu.Lock()
_, alreadyWatching = w.watches[name]
w.mu.Unlock()
if alreadyWatching {
return name, nil
}
fi, err = os.Lstat(name)
if err != nil {
return "", nil
}
}
watchfd, err = unix.Open(name, openMode, 0700)
if watchfd == -1 {
return "", err
}
isDir = fi.IsDir()
}
const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE
if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil {
unix.Close(watchfd)
return "", err
}
if !alreadyWatching {
w.mu.Lock()
w.watches[name] = watchfd
w.paths[watchfd] = pathInfo{name: name, isDir: isDir}
w.mu.Unlock()
}
if isDir {
// Watch the directory if it has not been watched before,
// or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
w.mu.Lock()
watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE &&
(!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE)
// Store flags so this watch can be updated later
w.dirFlags[name] = flags
w.mu.Unlock()
if watchDir {
if err := w.watchDirectoryFiles(name); err != nil {
return "", err
}
}
}
return name, nil
}
// readEvents reads from kqueue and converts the received kevents into
// Event values that it sends down the Events channel.
func (w *Watcher) readEvents() {
eventBuffer := make([]unix.Kevent_t, 10)
loop:
for {
// See if there is a message on the "done" channel
select {
case <-w.done:
break loop
default:
}
// Get new events
kevents, err := read(w.kq, eventBuffer, &keventWaitTime)
// EINTR is okay, the syscall was interrupted before timeout expired.
if err != nil && err != unix.EINTR {
select {
case w.Errors <- err:
case <-w.done:
break loop
}
continue
}
// Flush the events we received to the Events channel
for len(kevents) > 0 {
kevent := &kevents[0]
watchfd := int(kevent.Ident)
mask := uint32(kevent.Fflags)
w.mu.Lock()
path := w.paths[watchfd]
w.mu.Unlock()
event := newEvent(path.name, mask)
if path.isDir && !(event.Op&Remove == Remove) {
// Double check to make sure the directory exists. This can happen when
// we do a rm -fr on a recursively watched folders and we receive a
// modification event first but the folder has been deleted and later
// receive the delete event
if _, err := os.Lstat(event.Name); os.IsNotExist(err) {
// mark is as delete event
event.Op |= Remove
}
}
if event.Op&Rename == Rename || event.Op&Remove == Remove {
w.Remove(event.Name)
w.mu.Lock()
delete(w.fileExists, event.Name)
w.mu.Unlock()
}
if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) {
w.sendDirectoryChangeEvents(event.Name)
} else {
// Send the event on the Events channel.
select {
case w.Events <- event:
case <-w.done:
break loop
}
}
if event.Op&Remove == Remove {
// Look for a file that may have overwritten this.
// For example, mv f1 f2 will delete f2, then create f2.
if path.isDir {
fileDir := filepath.Clean(event.Name)
w.mu.Lock()
_, found := w.watches[fileDir]
w.mu.Unlock()
if found {
// make sure the directory exists before we watch for changes. When we
// do a recursive watch and perform rm -fr, the parent directory might
// have gone missing, ignore the missing directory and let the
// upcoming delete event remove the watch from the parent directory.
if _, err := os.Lstat(fileDir); err == nil {
w.sendDirectoryChangeEvents(fileDir)
}
}
} else {
filePath := filepath.Clean(event.Name)
if fileInfo, err := os.Lstat(filePath); err == nil {
w.sendFileCreatedEventIfNew(filePath, fileInfo)
}
}
}
// Move to next event
kevents = kevents[1:]
}
}
// cleanup
err := unix.Close(w.kq)
if err != nil {
// only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors.
select {
case w.Errors <- err:
default:
}
}
close(w.Events)
close(w.Errors)
}
// newEvent returns an platform-independent Event based on kqueue Fflags.
func newEvent(name string, mask uint32) Event {
e := Event{Name: name}
if mask&unix.NOTE_DELETE == unix.NOTE_DELETE {
e.Op |= Remove
}
if mask&unix.NOTE_WRITE == unix.NOTE_WRITE {
e.Op |= Write
}
if mask&unix.NOTE_RENAME == unix.NOTE_RENAME {
e.Op |= Rename
}
if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB {
e.Op |= Chmod
}
return e
}
func newCreateEvent(name string) Event {
return Event{Name: name, Op: Create}
}
// watchDirectoryFiles to mimic inotify when adding a watch on a directory
func (w *Watcher) watchDirectoryFiles(dirPath string) error {
// Get all files
files, err := ioutil.ReadDir(dirPath)
if err != nil {
return err
}
for _, fileInfo := range files {
filePath := filepath.Join(dirPath, fileInfo.Name())
filePath, err = w.internalWatch(filePath, fileInfo)
if err != nil {
return err
}
w.mu.Lock()
w.fileExists[filePath] = true
w.mu.Unlock()
}
return nil
}
// sendDirectoryEvents searches the directory for newly created files
// and sends them over the event channel. This functionality is to have
// the BSD version of fsnotify match Linux inotify which provides a
// create event for files created in a watched directory.
func (w *Watcher) sendDirectoryChangeEvents(dirPath string) {
// Get all files
files, err := ioutil.ReadDir(dirPath)
if err != nil {
select {
case w.Errors <- err:
case <-w.done:
return
}
}
// Search for new files
for _, fileInfo := range files {
filePath := filepath.Join(dirPath, fileInfo.Name())
err := w.sendFileCreatedEventIfNew(filePath, fileInfo)
if err != nil {
return
}
}
}
// sendFileCreatedEvent sends a create event if the file isn't already being tracked.
func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) {
w.mu.Lock()
_, doesExist := w.fileExists[filePath]
w.mu.Unlock()
if !doesExist {
// Send create event
select {
case w.Events <- newCreateEvent(filePath):
case <-w.done:
return
}
}
// like watchDirectoryFiles (but without doing another ReadDir)
filePath, err = w.internalWatch(filePath, fileInfo)
if err != nil {
return err
}
w.mu.Lock()
w.fileExists[filePath] = true
w.mu.Unlock()
return nil
}
func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) {
if fileInfo.IsDir() {
// mimic Linux providing delete events for subdirectories
// but preserve the flags used if currently watching subdirectory
w.mu.Lock()
flags := w.dirFlags[name]
w.mu.Unlock()
flags |= unix.NOTE_DELETE | unix.NOTE_RENAME
return w.addWatch(name, flags)
}
// watch file to mimic Linux inotify
return w.addWatch(name, noteAllEvents)
}
// kqueue creates a new kernel event queue and returns a descriptor.
func kqueue() (kq int, err error) {
kq, err = unix.Kqueue()
if kq == -1 {
return kq, err
}
return kq, nil
}
// register events with the queue
func register(kq int, fds []int, flags int, fflags uint32) error {
changes := make([]unix.Kevent_t, len(fds))
for i, fd := range fds {
// SetKevent converts int to the platform-specific types:
unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags)
changes[i].Fflags = fflags
}
// register the events
success, err := unix.Kevent(kq, changes, nil, nil)
if success == -1 {
return err
}
return nil
}
// read retrieves pending events, or waits until an event occurs.
// A timeout of nil blocks indefinitely, while 0 polls the queue.
func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) {
n, err := unix.Kevent(kq, nil, events, timeout)
if err != nil {
return nil, err
}
return events[0:n], nil
}
// durationToTimespec prepares a timeout value
func durationToTimespec(d time.Duration) unix.Timespec {
return unix.NsecToTimespec(d.Nanoseconds())
}

11
vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go generated vendored Normal file
View File

@ -0,0 +1,11 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build freebsd openbsd netbsd dragonfly
package fsnotify
import "golang.org/x/sys/unix"
const openMode = unix.O_NONBLOCK | unix.O_RDONLY | unix.O_CLOEXEC

View File

@ -0,0 +1,12 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin
package fsnotify
import "golang.org/x/sys/unix"
// note: this constant is not defined on BSD
const openMode = unix.O_EVTONLY | unix.O_CLOEXEC

561
vendor/github.com/fsnotify/fsnotify/windows.go generated vendored Normal file
View File

@ -0,0 +1,561 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build windows
package fsnotify
import (
"errors"
"fmt"
"os"
"path/filepath"
"runtime"
"sync"
"syscall"
"unsafe"
)
// Watcher watches a set of files, delivering events to a channel.
type Watcher struct {
Events chan Event
Errors chan error
isClosed bool // Set to true when Close() is first called
mu sync.Mutex // Map access
port syscall.Handle // Handle to completion port
watches watchMap // Map of watches (key: i-number)
input chan *input // Inputs to the reader are sent on this channel
quit chan chan<- error
}
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
func NewWatcher() (*Watcher, error) {
port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0)
if e != nil {
return nil, os.NewSyscallError("CreateIoCompletionPort", e)
}
w := &Watcher{
port: port,
watches: make(watchMap),
input: make(chan *input, 1),
Events: make(chan Event, 50),
Errors: make(chan error),
quit: make(chan chan<- error, 1),
}
go w.readEvents()
return w, nil
}
// Close removes all watches and closes the events channel.
func (w *Watcher) Close() error {
if w.isClosed {
return nil
}
w.isClosed = true
// Send "quit" message to the reader goroutine
ch := make(chan error)
w.quit <- ch
if err := w.wakeupReader(); err != nil {
return err
}
return <-ch
}
// Add starts watching the named file or directory (non-recursively).
func (w *Watcher) Add(name string) error {
if w.isClosed {
return errors.New("watcher already closed")
}
in := &input{
op: opAddWatch,
path: filepath.Clean(name),
flags: sysFSALLEVENTS,
reply: make(chan error),
}
w.input <- in
if err := w.wakeupReader(); err != nil {
return err
}
return <-in.reply
}
// Remove stops watching the the named file or directory (non-recursively).
func (w *Watcher) Remove(name string) error {
in := &input{
op: opRemoveWatch,
path: filepath.Clean(name),
reply: make(chan error),
}
w.input <- in
if err := w.wakeupReader(); err != nil {
return err
}
return <-in.reply
}
const (
// Options for AddWatch
sysFSONESHOT = 0x80000000
sysFSONLYDIR = 0x1000000
// Events
sysFSACCESS = 0x1
sysFSALLEVENTS = 0xfff
sysFSATTRIB = 0x4
sysFSCLOSE = 0x18
sysFSCREATE = 0x100
sysFSDELETE = 0x200
sysFSDELETESELF = 0x400
sysFSMODIFY = 0x2
sysFSMOVE = 0xc0
sysFSMOVEDFROM = 0x40
sysFSMOVEDTO = 0x80
sysFSMOVESELF = 0x800
// Special events
sysFSIGNORED = 0x8000
sysFSQOVERFLOW = 0x4000
)
func newEvent(name string, mask uint32) Event {
e := Event{Name: name}
if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO {
e.Op |= Create
}
if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF {
e.Op |= Remove
}
if mask&sysFSMODIFY == sysFSMODIFY {
e.Op |= Write
}
if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM {
e.Op |= Rename
}
if mask&sysFSATTRIB == sysFSATTRIB {
e.Op |= Chmod
}
return e
}
const (
opAddWatch = iota
opRemoveWatch
)
const (
provisional uint64 = 1 << (32 + iota)
)
type input struct {
op int
path string
flags uint32
reply chan error
}
type inode struct {
handle syscall.Handle
volume uint32
index uint64
}
type watch struct {
ov syscall.Overlapped
ino *inode // i-number
path string // Directory path
mask uint64 // Directory itself is being watched with these notify flags
names map[string]uint64 // Map of names being watched and their notify flags
rename string // Remembers the old name while renaming a file
buf [4096]byte
}
type indexMap map[uint64]*watch
type watchMap map[uint32]indexMap
func (w *Watcher) wakeupReader() error {
e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil)
if e != nil {
return os.NewSyscallError("PostQueuedCompletionStatus", e)
}
return nil
}
func getDir(pathname string) (dir string, err error) {
attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname))
if e != nil {
return "", os.NewSyscallError("GetFileAttributes", e)
}
if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 {
dir = pathname
} else {
dir, _ = filepath.Split(pathname)
dir = filepath.Clean(dir)
}
return
}
func getIno(path string) (ino *inode, err error) {
h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path),
syscall.FILE_LIST_DIRECTORY,
syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
nil, syscall.OPEN_EXISTING,
syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0)
if e != nil {
return nil, os.NewSyscallError("CreateFile", e)
}
var fi syscall.ByHandleFileInformation
if e = syscall.GetFileInformationByHandle(h, &fi); e != nil {
syscall.CloseHandle(h)
return nil, os.NewSyscallError("GetFileInformationByHandle", e)
}
ino = &inode{
handle: h,
volume: fi.VolumeSerialNumber,
index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow),
}
return ino, nil
}
// Must run within the I/O thread.
func (m watchMap) get(ino *inode) *watch {
if i := m[ino.volume]; i != nil {
return i[ino.index]
}
return nil
}
// Must run within the I/O thread.
func (m watchMap) set(ino *inode, watch *watch) {
i := m[ino.volume]
if i == nil {
i = make(indexMap)
m[ino.volume] = i
}
i[ino.index] = watch
}
// Must run within the I/O thread.
func (w *Watcher) addWatch(pathname string, flags uint64) error {
dir, err := getDir(pathname)
if err != nil {
return err
}
if flags&sysFSONLYDIR != 0 && pathname != dir {
return nil
}
ino, err := getIno(dir)
if err != nil {
return err
}
w.mu.Lock()
watchEntry := w.watches.get(ino)
w.mu.Unlock()
if watchEntry == nil {
if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil {
syscall.CloseHandle(ino.handle)
return os.NewSyscallError("CreateIoCompletionPort", e)
}
watchEntry = &watch{
ino: ino,
path: dir,
names: make(map[string]uint64),
}
w.mu.Lock()
w.watches.set(ino, watchEntry)
w.mu.Unlock()
flags |= provisional
} else {
syscall.CloseHandle(ino.handle)
}
if pathname == dir {
watchEntry.mask |= flags
} else {
watchEntry.names[filepath.Base(pathname)] |= flags
}
if err = w.startRead(watchEntry); err != nil {
return err
}
if pathname == dir {
watchEntry.mask &= ^provisional
} else {
watchEntry.names[filepath.Base(pathname)] &= ^provisional
}
return nil
}
// Must run within the I/O thread.
func (w *Watcher) remWatch(pathname string) error {
dir, err := getDir(pathname)
if err != nil {
return err
}
ino, err := getIno(dir)
if err != nil {
return err
}
w.mu.Lock()
watch := w.watches.get(ino)
w.mu.Unlock()
if watch == nil {
return fmt.Errorf("can't remove non-existent watch for: %s", pathname)
}
if pathname == dir {
w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
watch.mask = 0
} else {
name := filepath.Base(pathname)
w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED)
delete(watch.names, name)
}
return w.startRead(watch)
}
// Must run within the I/O thread.
func (w *Watcher) deleteWatch(watch *watch) {
for name, mask := range watch.names {
if mask&provisional == 0 {
w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED)
}
delete(watch.names, name)
}
if watch.mask != 0 {
if watch.mask&provisional == 0 {
w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
}
watch.mask = 0
}
}
// Must run within the I/O thread.
func (w *Watcher) startRead(watch *watch) error {
if e := syscall.CancelIo(watch.ino.handle); e != nil {
w.Errors <- os.NewSyscallError("CancelIo", e)
w.deleteWatch(watch)
}
mask := toWindowsFlags(watch.mask)
for _, m := range watch.names {
mask |= toWindowsFlags(m)
}
if mask == 0 {
if e := syscall.CloseHandle(watch.ino.handle); e != nil {
w.Errors <- os.NewSyscallError("CloseHandle", e)
}
w.mu.Lock()
delete(w.watches[watch.ino.volume], watch.ino.index)
w.mu.Unlock()
return nil
}
e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0],
uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0)
if e != nil {
err := os.NewSyscallError("ReadDirectoryChanges", e)
if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
// Watched directory was probably removed
if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) {
if watch.mask&sysFSONESHOT != 0 {
watch.mask = 0
}
}
err = nil
}
w.deleteWatch(watch)
w.startRead(watch)
return err
}
return nil
}
// readEvents reads from the I/O completion port, converts the
// received events into Event objects and sends them via the Events channel.
// Entry point to the I/O thread.
func (w *Watcher) readEvents() {
var (
n, key uint32
ov *syscall.Overlapped
)
runtime.LockOSThread()
for {
e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE)
watch := (*watch)(unsafe.Pointer(ov))
if watch == nil {
select {
case ch := <-w.quit:
w.mu.Lock()
var indexes []indexMap
for _, index := range w.watches {
indexes = append(indexes, index)
}
w.mu.Unlock()
for _, index := range indexes {
for _, watch := range index {
w.deleteWatch(watch)
w.startRead(watch)
}
}
var err error
if e := syscall.CloseHandle(w.port); e != nil {
err = os.NewSyscallError("CloseHandle", e)
}
close(w.Events)
close(w.Errors)
ch <- err
return
case in := <-w.input:
switch in.op {
case opAddWatch:
in.reply <- w.addWatch(in.path, uint64(in.flags))
case opRemoveWatch:
in.reply <- w.remWatch(in.path)
}
default:
}
continue
}
switch e {
case syscall.ERROR_MORE_DATA:
if watch == nil {
w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")
} else {
// The i/o succeeded but the buffer is full.
// In theory we should be building up a full packet.
// In practice we can get away with just carrying on.
n = uint32(unsafe.Sizeof(watch.buf))
}
case syscall.ERROR_ACCESS_DENIED:
// Watched directory was probably removed
w.sendEvent(watch.path, watch.mask&sysFSDELETESELF)
w.deleteWatch(watch)
w.startRead(watch)
continue
case syscall.ERROR_OPERATION_ABORTED:
// CancelIo was called on this handle
continue
default:
w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e)
continue
case nil:
}
var offset uint32
for {
if n == 0 {
w.Events <- newEvent("", sysFSQOVERFLOW)
w.Errors <- errors.New("short read in readEvents()")
break
}
// Point "raw" to the event in the buffer
raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset]))
buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName))
name := syscall.UTF16ToString(buf[:raw.FileNameLength/2])
fullname := filepath.Join(watch.path, name)
var mask uint64
switch raw.Action {
case syscall.FILE_ACTION_REMOVED:
mask = sysFSDELETESELF
case syscall.FILE_ACTION_MODIFIED:
mask = sysFSMODIFY
case syscall.FILE_ACTION_RENAMED_OLD_NAME:
watch.rename = name
case syscall.FILE_ACTION_RENAMED_NEW_NAME:
if watch.names[watch.rename] != 0 {
watch.names[name] |= watch.names[watch.rename]
delete(watch.names, watch.rename)
mask = sysFSMOVESELF
}
}
sendNameEvent := func() {
if w.sendEvent(fullname, watch.names[name]&mask) {
if watch.names[name]&sysFSONESHOT != 0 {
delete(watch.names, name)
}
}
}
if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME {
sendNameEvent()
}
if raw.Action == syscall.FILE_ACTION_REMOVED {
w.sendEvent(fullname, watch.names[name]&sysFSIGNORED)
delete(watch.names, name)
}
if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) {
if watch.mask&sysFSONESHOT != 0 {
watch.mask = 0
}
}
if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME {
fullname = filepath.Join(watch.path, watch.rename)
sendNameEvent()
}
// Move to the next event in the buffer
if raw.NextEntryOffset == 0 {
break
}
offset += raw.NextEntryOffset
// Error!
if offset >= n {
w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.")
break
}
}
if err := w.startRead(watch); err != nil {
w.Errors <- err
}
}
}
func (w *Watcher) sendEvent(name string, mask uint64) bool {
if mask == 0 {
return false
}
event := newEvent(name, uint32(mask))
select {
case ch := <-w.quit:
w.quit <- ch
case w.Events <- event:
}
return true
}
func toWindowsFlags(mask uint64) uint32 {
var m uint32
if mask&sysFSACCESS != 0 {
m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS
}
if mask&sysFSMODIFY != 0 {
m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE
}
if mask&sysFSATTRIB != 0 {
m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES
}
if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 {
m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME
}
return m
}
func toFSnotifyFlags(action uint32) uint64 {
switch action {
case syscall.FILE_ACTION_ADDED:
return sysFSCREATE
case syscall.FILE_ACTION_REMOVED:
return sysFSDELETE
case syscall.FILE_ACTION_MODIFIED:
return sysFSMODIFY
case syscall.FILE_ACTION_RENAMED_OLD_NAME:
return sysFSMOVEDFROM
case syscall.FILE_ACTION_RENAMED_NEW_NAME:
return sysFSMOVEDTO
}
return 0
}

File diff suppressed because it is too large Load Diff

View File

@ -689,7 +689,6 @@ message Container {
repeated VolumeMount volumeMounts = 9;
// volumeDevices is the list of block devices to be used by the container.
// This is a beta feature.
// +patchMergeKey=devicePath
// +patchStrategy=merge
// +optional
@ -1276,7 +1275,6 @@ message EphemeralContainerCommon {
repeated VolumeMount volumeMounts = 9;
// volumeDevices is the list of block devices to be used by the container.
// This is a beta feature.
// +patchMergeKey=devicePath
// +patchStrategy=merge
// +optional
@ -2636,15 +2634,18 @@ message PersistentVolumeClaimSpec {
// volumeMode defines what type of volume is required by the claim.
// Value of Filesystem is implied when not included in claim spec.
// This is a beta feature.
// +optional
optional string volumeMode = 6;
// This field requires the VolumeSnapshotDataSource alpha feature gate to be
// enabled and currently VolumeSnapshot is the only supported data source.
// If the provisioner can support VolumeSnapshot data source, it will create
// a new volume and data will be restored to the volume at the same time.
// If the provisioner does not support VolumeSnapshot data source, volume will
// This field can be used to specify either:
// * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot - Beta)
// * An existing PVC (PersistentVolumeClaim)
// * An existing custom resource/object that implements data population (Alpha)
// In order to use VolumeSnapshot object types, the appropriate feature gate
// must be enabled (VolumeSnapshotDataSource or AnyVolumeDataSource)
// If the provisioner or an external controller can support the specified data source,
// it will create a new volume based on the contents of the specified data source.
// If the specified data source is not supported, the volume will
// not be created and the failure will be reported as an event.
// In the future, we plan to support more data source types and the behavior
// of the provisioner may change.
@ -2852,7 +2853,6 @@ message PersistentVolumeSpec {
// volumeMode defines if a volume is intended to be used with a formatted filesystem
// or to remain in raw block state. Value of Filesystem is implied when not included in spec.
// This is a beta feature.
// +optional
optional string volumeMode = 8;
@ -3278,6 +3278,15 @@ message PodSecurityContext {
// sysctls (by the container runtime) might fail to launch.
// +optional
repeated Sysctl sysctls = 7;
// fsGroupChangePolicy defines behavior of changing ownership and permission of the volume
// before being exposed inside Pod. This field will only apply to
// volume types which support fsGroup based ownership(and permissions).
// It will have no effect on ephemeral volume types such as: secret, configmaps
// and emptydir.
// Valid values are "OnRootMismatch" and "Always". If not specified defaults to "Always".
// +optional
optional string fsGroupChangePolicy = 9;
}
// Describes the class of pods that should avoid this node.
@ -4912,7 +4921,7 @@ message Taint {
// Required. The taint key to be applied to a node.
optional string key = 1;
// Required. The taint value corresponding to the taint key.
// The taint value corresponding to the taint key.
// +optional
optional string value = 2;
@ -5304,14 +5313,12 @@ message WeightedPodAffinityTerm {
// WindowsSecurityContextOptions contain Windows-specific options and credentials.
message WindowsSecurityContextOptions {
// GMSACredentialSpecName is the name of the GMSA credential spec to use.
// This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag.
// +optional
optional string gmsaCredentialSpecName = 1;
// GMSACredentialSpec is where the GMSA admission webhook
// (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the
// GMSA credential spec named by the GMSACredentialSpecName field.
// This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag.
// +optional
optional string gmsaCredentialSpec = 2;

46
vendor/k8s.io/api/core/v1/types.go generated vendored
View File

@ -331,7 +331,6 @@ type PersistentVolumeSpec struct {
MountOptions []string `json:"mountOptions,omitempty" protobuf:"bytes,7,opt,name=mountOptions"`
// volumeMode defines if a volume is intended to be used with a formatted filesystem
// or to remain in raw block state. Value of Filesystem is implied when not included in spec.
// This is a beta feature.
// +optional
VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,8,opt,name=volumeMode,casttype=PersistentVolumeMode"`
// NodeAffinity defines constraints that limit what nodes this volume can be accessed from.
@ -460,14 +459,17 @@ type PersistentVolumeClaimSpec struct {
StorageClassName *string `json:"storageClassName,omitempty" protobuf:"bytes,5,opt,name=storageClassName"`
// volumeMode defines what type of volume is required by the claim.
// Value of Filesystem is implied when not included in claim spec.
// This is a beta feature.
// +optional
VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,6,opt,name=volumeMode,casttype=PersistentVolumeMode"`
// This field requires the VolumeSnapshotDataSource alpha feature gate to be
// enabled and currently VolumeSnapshot is the only supported data source.
// If the provisioner can support VolumeSnapshot data source, it will create
// a new volume and data will be restored to the volume at the same time.
// If the provisioner does not support VolumeSnapshot data source, volume will
// This field can be used to specify either:
// * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot - Beta)
// * An existing PVC (PersistentVolumeClaim)
// * An existing custom resource/object that implements data population (Alpha)
// In order to use VolumeSnapshot object types, the appropriate feature gate
// must be enabled (VolumeSnapshotDataSource or AnyVolumeDataSource)
// If the provisioner or an external controller can support the specified data source,
// it will create a new volume based on the contents of the specified data source.
// If the specified data source is not supported, the volume will
// not be created and the failure will be reported as an event.
// In the future, we plan to support more data source types and the behavior
// of the provisioner may change.
@ -2181,7 +2183,6 @@ type Container struct {
// +patchStrategy=merge
VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"`
// volumeDevices is the list of block devices to be used by the container.
// This is a beta feature.
// +patchMergeKey=devicePath
// +patchStrategy=merge
// +optional
@ -2751,7 +2752,7 @@ type PreferredSchedulingTerm struct {
type Taint struct {
// Required. The taint key to be applied to a node.
Key string `json:"key" protobuf:"bytes,1,opt,name=key"`
// Required. The taint value corresponding to the taint key.
// The taint value corresponding to the taint key.
// +optional
Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"`
// Required. The effect of the taint on pods
@ -3125,6 +3126,22 @@ type HostAlias struct {
Hostnames []string `json:"hostnames,omitempty" protobuf:"bytes,2,rep,name=hostnames"`
}
// PodFSGroupChangePolicy holds policies that will be used for applying fsGroup to a volume
// when volume is mounted.
type PodFSGroupChangePolicy string
const (
// FSGroupChangeOnRootMismatch indicates that volume's ownership and permissions will be changed
// only when permission and ownership of root directory does not match with expected
// permissions on the volume. This can help shorten the time it takes to change
// ownership and permissions of a volume.
FSGroupChangeOnRootMismatch PodFSGroupChangePolicy = "OnRootMismatch"
// FSGroupChangeAlways indicates that volume's ownership and permissions
// should always be changed whenever volume is mounted inside a Pod. This the default
// behavior.
FSGroupChangeAlways PodFSGroupChangePolicy = "Always"
)
// PodSecurityContext holds pod-level security attributes and common container settings.
// Some fields are also present in container.securityContext. Field values of
// container.securityContext take precedence over field values of PodSecurityContext.
@ -3183,6 +3200,14 @@ type PodSecurityContext struct {
// sysctls (by the container runtime) might fail to launch.
// +optional
Sysctls []Sysctl `json:"sysctls,omitempty" protobuf:"bytes,7,rep,name=sysctls"`
// fsGroupChangePolicy defines behavior of changing ownership and permission of the volume
// before being exposed inside Pod. This field will only apply to
// volume types which support fsGroup based ownership(and permissions).
// It will have no effect on ephemeral volume types such as: secret, configmaps
// and emptydir.
// Valid values are "OnRootMismatch" and "Always". If not specified defaults to "Always".
// +optional
FSGroupChangePolicy *PodFSGroupChangePolicy `json:"fsGroupChangePolicy,omitempty" protobuf:"bytes,9,opt,name=fsGroupChangePolicy"`
}
// PodQOSClass defines the supported qos classes of Pods.
@ -3298,7 +3323,6 @@ type EphemeralContainerCommon struct {
// +patchStrategy=merge
VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"`
// volumeDevices is the list of block devices to be used by the container.
// This is a beta feature.
// +patchMergeKey=devicePath
// +patchStrategy=merge
// +optional
@ -5843,14 +5867,12 @@ type SELinuxOptions struct {
// WindowsSecurityContextOptions contain Windows-specific options and credentials.
type WindowsSecurityContextOptions struct {
// GMSACredentialSpecName is the name of the GMSA credential spec to use.
// This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag.
// +optional
GMSACredentialSpecName *string `json:"gmsaCredentialSpecName,omitempty" protobuf:"bytes,1,opt,name=gmsaCredentialSpecName"`
// GMSACredentialSpec is where the GMSA admission webhook
// (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the
// GMSA credential spec named by the GMSACredentialSpecName field.
// This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag.
// +optional
GMSACredentialSpec *string `json:"gmsaCredentialSpec,omitempty" protobuf:"bytes,2,opt,name=gmsaCredentialSpec"`

View File

@ -336,7 +336,7 @@ var map_Container = map[string]string{
"env": "List of environment variables to set in the container. Cannot be updated.",
"resources": "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/",
"volumeMounts": "Pod volumes to mount into the container's filesystem. Cannot be updated.",
"volumeDevices": "volumeDevices is the list of block devices to be used by the container. This is a beta feature.",
"volumeDevices": "volumeDevices is the list of block devices to be used by the container.",
"livenessProbe": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
"readinessProbe": "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
"startupProbe": "StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. This is a beta feature enabled by the StartupProbe feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
@ -599,7 +599,7 @@ var map_EphemeralContainerCommon = map[string]string{
"env": "List of environment variables to set in the container. Cannot be updated.",
"resources": "Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod.",
"volumeMounts": "Pod volumes to mount into the container's filesystem. Cannot be updated.",
"volumeDevices": "volumeDevices is the list of block devices to be used by the container. This is a beta feature.",
"volumeDevices": "volumeDevices is the list of block devices to be used by the container.",
"livenessProbe": "Probes are not allowed for ephemeral containers.",
"readinessProbe": "Probes are not allowed for ephemeral containers.",
"startupProbe": "Probes are not allowed for ephemeral containers.",
@ -1300,8 +1300,8 @@ var map_PersistentVolumeClaimSpec = map[string]string{
"resources": "Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources",
"volumeName": "VolumeName is the binding reference to the PersistentVolume backing this claim.",
"storageClassName": "Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1",
"volumeMode": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. This is a beta feature.",
"dataSource": "This field requires the VolumeSnapshotDataSource alpha feature gate to be enabled and currently VolumeSnapshot is the only supported data source. If the provisioner can support VolumeSnapshot data source, it will create a new volume and data will be restored to the volume at the same time. If the provisioner does not support VolumeSnapshot data source, volume will not be created and the failure will be reported as an event. In the future, we plan to support more data source types and the behavior of the provisioner may change.",
"volumeMode": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.",
"dataSource": "This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot - Beta) * An existing PVC (PersistentVolumeClaim) * An existing custom resource/object that implements data population (Alpha) In order to use VolumeSnapshot object types, the appropriate feature gate must be enabled (VolumeSnapshotDataSource or AnyVolumeDataSource) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the specified data source is not supported, the volume will not be created and the failure will be reported as an event. In the future, we plan to support more data source types and the behavior of the provisioner may change.",
}
func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string {
@ -1378,7 +1378,7 @@ var map_PersistentVolumeSpec = map[string]string{
"persistentVolumeReclaimPolicy": "What happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming",
"storageClassName": "Name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass.",
"mountOptions": "A list of mount options, e.g. [\"ro\", \"soft\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options",
"volumeMode": "volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec. This is a beta feature.",
"volumeMode": "volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec.",
"nodeAffinity": "NodeAffinity defines constraints that limit what nodes this volume can be accessed from. This field influences the scheduling of pods that use this volume.",
}
@ -1583,6 +1583,7 @@ var map_PodSecurityContext = map[string]string{
"supplementalGroups": "A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.",
"fsGroup": "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\n\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw ",
"sysctls": "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch.",
"fsGroupChangePolicy": "fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \"OnRootMismatch\" and \"Always\". If not specified defaults to \"Always\".",
}
func (PodSecurityContext) SwaggerDoc() map[string]string {
@ -2282,7 +2283,7 @@ func (TCPSocketAction) SwaggerDoc() map[string]string {
var map_Taint = map[string]string{
"": "The node this Taint is attached to has the \"effect\" on any pod that does not tolerate the Taint.",
"key": "Required. The taint key to be applied to a node.",
"value": "Required. The taint value corresponding to the taint key.",
"value": "The taint value corresponding to the taint key.",
"effect": "Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule and NoExecute.",
"timeAdded": "TimeAdded represents the time at which the taint was added. It is only written for NoExecute taints.",
}
@ -2460,8 +2461,8 @@ func (WeightedPodAffinityTerm) SwaggerDoc() map[string]string {
var map_WindowsSecurityContextOptions = map[string]string{
"": "WindowsSecurityContextOptions contain Windows-specific options and credentials.",
"gmsaCredentialSpecName": "GMSACredentialSpecName is the name of the GMSA credential spec to use. This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag.",
"gmsaCredentialSpec": "GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag.",
"gmsaCredentialSpecName": "GMSACredentialSpecName is the name of the GMSA credential spec to use.",
"gmsaCredentialSpec": "GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field.",
"runAsUserName": "The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.",
}

View File

@ -18,38 +18,31 @@ package v1
const (
// TaintNodeNotReady will be added when node is not ready
// and feature-gate for TaintBasedEvictions flag is enabled,
// and removed when node becomes ready.
TaintNodeNotReady = "node.kubernetes.io/not-ready"
// TaintNodeUnreachable will be added when node becomes unreachable
// (corresponding to NodeReady status ConditionUnknown)
// and feature-gate for TaintBasedEvictions flag is enabled,
// and removed when node becomes reachable (NodeReady status ConditionTrue).
TaintNodeUnreachable = "node.kubernetes.io/unreachable"
// TaintNodeUnschedulable will be added when node becomes unschedulable
// and feature-gate for TaintNodesByCondition flag is enabled,
// and removed when node becomes scheduable.
TaintNodeUnschedulable = "node.kubernetes.io/unschedulable"
// TaintNodeMemoryPressure will be added when node has memory pressure
// and feature-gate for TaintNodesByCondition flag is enabled,
// and removed when node has enough memory.
TaintNodeMemoryPressure = "node.kubernetes.io/memory-pressure"
// TaintNodeDiskPressure will be added when node has disk pressure
// and feature-gate for TaintNodesByCondition flag is enabled,
// and removed when node has enough disk.
TaintNodeDiskPressure = "node.kubernetes.io/disk-pressure"
// TaintNodeNetworkUnavailable will be added when node's network is unavailable
// and feature-gate for TaintNodesByCondition flag is enabled,
// and removed when network becomes ready.
TaintNodeNetworkUnavailable = "node.kubernetes.io/network-unavailable"
// TaintNodePIDPressure will be added when node has pid pressure
// and feature-gate for TaintNodesByCondition flag is enabled,
// and removed when node has enough disk.
TaintNodePIDPressure = "node.kubernetes.io/pid-pressure"
)

View File

@ -3689,6 +3689,11 @@ func (in *PodSecurityContext) DeepCopyInto(out *PodSecurityContext) {
*out = make([]Sysctl, len(*in))
copy(*out, *in)
}
if in.FSGroupChangePolicy != nil {
in, out := &in.FSGroupChangePolicy, &out.FSGroupChangePolicy
*out = new(PodFSGroupChangePolicy)
**out = **in
}
return
}

4
vendor/k8s.io/api/go.mod generated vendored
View File

@ -7,11 +7,11 @@ go 1.13
require (
github.com/gogo/protobuf v1.3.1
github.com/stretchr/testify v1.4.0
k8s.io/apimachinery v0.18.0-beta.1
k8s.io/apimachinery v0.18.0
)
replace (
golang.org/x/sys => golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a // pinned to release-branch.go1.13
golang.org/x/tools => golang.org/x/tools v0.0.0-20190821162956-65e3620a7ae7 // pinned to release-branch.go1.13
k8s.io/apimachinery => k8s.io/apimachinery v0.18.0-beta.1
k8s.io/apimachinery => k8s.io/apimachinery v0.18.0
)

2
vendor/k8s.io/apimachinery/go.mod generated vendored
View File

@ -33,7 +33,7 @@ require (
gopkg.in/yaml.v2 v2.2.8
k8s.io/klog v1.0.0
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c
sigs.k8s.io/structured-merge-diff/v3 v3.0.0
sigs.k8s.io/yaml v1.2.0
)

View File

@ -252,7 +252,9 @@ func ResetObjectMetaForStatus(meta, existingMeta Object) {
meta.SetAnnotations(existingMeta.GetAnnotations())
meta.SetFinalizers(existingMeta.GetFinalizers())
meta.SetOwnerReferences(existingMeta.GetOwnerReferences())
meta.SetManagedFields(existingMeta.GetManagedFields())
// managedFields must be preserved since it's been modified to
// track changed fields in the status update.
//meta.SetManagedFields(existingMeta.GetManagedFields())
}
// MarshalJSON implements json.Marshaler

View File

@ -28,9 +28,14 @@ type MessageCountMap map[string]int
// Aggregate represents an object that contains multiple errors, but does not
// necessarily have singular semantic meaning.
// The aggregate can be used with `errors.Is()` to check for the occurrence of
// a specific error type.
// Errors.As() is not supported, because the caller presumably cares about a
// specific error of potentially multiple that match the given type.
type Aggregate interface {
error
Errors() []error
Is(error) bool
}
// NewAggregate converts a slice of errors into an Aggregate interface, which
@ -71,16 +76,17 @@ func (agg aggregate) Error() string {
}
seenerrs := sets.NewString()
result := ""
agg.visit(func(err error) {
agg.visit(func(err error) bool {
msg := err.Error()
if seenerrs.Has(msg) {
return
return false
}
seenerrs.Insert(msg)
if len(seenerrs) > 1 {
result += ", "
}
result += msg
return false
})
if len(seenerrs) == 1 {
return result
@ -88,21 +94,35 @@ func (agg aggregate) Error() string {
return "[" + result + "]"
}
func (agg aggregate) visit(f func(err error)) {
func (agg aggregate) Is(target error) bool {
return agg.visit(func(err error) bool {
return errors.Is(err, target)
})
}
func (agg aggregate) visit(f func(err error) bool) bool {
for _, err := range agg {
switch err := err.(type) {
case aggregate:
err.visit(f)
if match := err.visit(f); match {
return match
}
case Aggregate:
for _, nestedErr := range err.Errors() {
f(nestedErr)
if match := f(nestedErr); match {
return match
}
}
default:
f(err)
if match := f(err); match {
return match
}
}
}
return false
}
// Errors is part of the Aggregate interface.
func (agg aggregate) Errors() []error {
return []error(agg)

View File

@ -206,13 +206,17 @@ func GetHTTPClient(req *http.Request) string {
return "unknown"
}
// SourceIPs splits the comma separated X-Forwarded-For header or returns the X-Real-Ip header or req.RemoteAddr,
// in that order, ignoring invalid IPs. It returns nil if all of these are empty or invalid.
// SourceIPs splits the comma separated X-Forwarded-For header and joins it with
// the X-Real-Ip header and/or req.RemoteAddr, ignoring invalid IPs.
// The X-Real-Ip is omitted if it's already present in the X-Forwarded-For chain.
// The req.RemoteAddr is always the last IP in the returned list.
// It returns nil if all of these are empty or invalid.
func SourceIPs(req *http.Request) []net.IP {
var srcIPs []net.IP
hdr := req.Header
// First check the X-Forwarded-For header for requests via proxy.
hdrForwardedFor := hdr.Get("X-Forwarded-For")
forwardedForIPs := []net.IP{}
if hdrForwardedFor != "" {
// X-Forwarded-For can be a csv of IPs in case of multiple proxies.
// Use the first valid one.
@ -220,38 +224,49 @@ func SourceIPs(req *http.Request) []net.IP {
for _, part := range parts {
ip := net.ParseIP(strings.TrimSpace(part))
if ip != nil {
forwardedForIPs = append(forwardedForIPs, ip)
srcIPs = append(srcIPs, ip)
}
}
}
if len(forwardedForIPs) > 0 {
return forwardedForIPs
}
// Try the X-Real-Ip header.
hdrRealIp := hdr.Get("X-Real-Ip")
if hdrRealIp != "" {
ip := net.ParseIP(hdrRealIp)
if ip != nil {
return []net.IP{ip}
// Only append the X-Real-Ip if it's not already contained in the X-Forwarded-For chain.
if ip != nil && !containsIP(srcIPs, ip) {
srcIPs = append(srcIPs, ip)
}
}
// Fallback to Remote Address in request, which will give the correct client IP when there is no proxy.
// Always include the request Remote Address as it cannot be easily spoofed.
var remoteIP net.IP
// Remote Address in Go's HTTP server is in the form host:port so we need to split that first.
host, _, err := net.SplitHostPort(req.RemoteAddr)
if err == nil {
if remoteIP := net.ParseIP(host); remoteIP != nil {
return []net.IP{remoteIP}
remoteIP = net.ParseIP(host)
}
}
// Fallback if Remote Address was just IP.
if remoteIP := net.ParseIP(req.RemoteAddr); remoteIP != nil {
return []net.IP{remoteIP}
if remoteIP == nil {
remoteIP = net.ParseIP(req.RemoteAddr)
}
return nil
// Don't duplicate remote IP if it's already the last address in the chain.
if remoteIP != nil && (len(srcIPs) == 0 || !remoteIP.Equal(srcIPs[len(srcIPs)-1])) {
srcIPs = append(srcIPs, remoteIP)
}
return srcIPs
}
// Checks whether the given IP address is contained in the list of IPs.
func containsIP(ips []net.IP, ip net.IP) bool {
for _, v := range ips {
if v.Equal(ip) {
return true
}
}
return false
}
// Extracts and returns the clients IP from the given request.

View File

@ -109,6 +109,44 @@ func IsFullyQualifiedDomainName(fldPath *field.Path, name string) field.ErrorLis
return allErrors
}
// Allowed characters in an HTTP Path as defined by RFC 3986. A HTTP path may
// contain:
// * unreserved characters (alphanumeric, '-', '.', '_', '~')
// * percent-encoded octets
// * sub-delims ("!", "$", "&", "'", "(", ")", "*", "+", ",", ";", "=")
// * a colon character (":")
const httpPathFmt string = `[A-Za-z0-9/\-._~%!$&'()*+,;=:]+`
var httpPathRegexp = regexp.MustCompile("^" + httpPathFmt + "$")
// IsDomainPrefixedPath checks if the given string is a domain-prefixed path
// (e.g. acme.io/foo). All characters before the first "/" must be a valid
// subdomain as defined by RFC 1123. All characters trailing the first "/" must
// be valid HTTP Path characters as defined by RFC 3986.
func IsDomainPrefixedPath(fldPath *field.Path, dpPath string) field.ErrorList {
var allErrs field.ErrorList
if len(dpPath) == 0 {
return append(allErrs, field.Required(fldPath, ""))
}
segments := strings.SplitN(dpPath, "/", 2)
if len(segments) != 2 || len(segments[0]) == 0 || len(segments[1]) == 0 {
return append(allErrs, field.Invalid(fldPath, dpPath, "must be a domain-prefixed path (such as \"acme.io/foo\")"))
}
host := segments[0]
for _, err := range IsDNS1123Subdomain(host) {
allErrs = append(allErrs, field.Invalid(fldPath, host, err))
}
path := segments[1]
if !httpPathRegexp.MatchString(path) {
return append(allErrs, field.Invalid(fldPath, path, RegexError("Invalid path", httpPathFmt)))
}
return allErrs
}
const labelValueFmt string = "(" + qualifiedNameFmt + ")?"
const labelValueErrMsg string = "a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character"

19
vendor/k8s.io/apimachinery/pkg/util/wait/doc.go generated vendored Normal file
View File

@ -0,0 +1,19 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package wait provides tools for polling or listening for changes
// to a condition.
package wait // import "k8s.io/apimachinery/pkg/util/wait"

593
vendor/k8s.io/apimachinery/pkg/util/wait/wait.go generated vendored Normal file
View File

@ -0,0 +1,593 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package wait
import (
"context"
"errors"
"math"
"math/rand"
"sync"
"time"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/runtime"
)
// For any test of the style:
// ...
// <- time.After(timeout):
// t.Errorf("Timed out")
// The value for timeout should effectively be "forever." Obviously we don't want our tests to truly lock up forever, but 30s
// is long enough that it is effectively forever for the things that can slow down a run on a heavily contended machine
// (GC, seeks, etc), but not so long as to make a developer ctrl-c a test run if they do happen to break that test.
var ForeverTestTimeout = time.Second * 30
// NeverStop may be passed to Until to make it never stop.
var NeverStop <-chan struct{} = make(chan struct{})
// Group allows to start a group of goroutines and wait for their completion.
type Group struct {
wg sync.WaitGroup
}
func (g *Group) Wait() {
g.wg.Wait()
}
// StartWithChannel starts f in a new goroutine in the group.
// stopCh is passed to f as an argument. f should stop when stopCh is available.
func (g *Group) StartWithChannel(stopCh <-chan struct{}, f func(stopCh <-chan struct{})) {
g.Start(func() {
f(stopCh)
})
}
// StartWithContext starts f in a new goroutine in the group.
// ctx is passed to f as an argument. f should stop when ctx.Done() is available.
func (g *Group) StartWithContext(ctx context.Context, f func(context.Context)) {
g.Start(func() {
f(ctx)
})
}
// Start starts f in a new goroutine in the group.
func (g *Group) Start(f func()) {
g.wg.Add(1)
go func() {
defer g.wg.Done()
f()
}()
}
// Forever calls f every period for ever.
//
// Forever is syntactic sugar on top of Until.
func Forever(f func(), period time.Duration) {
Until(f, period, NeverStop)
}
// Until loops until stop channel is closed, running f every period.
//
// Until is syntactic sugar on top of JitterUntil with zero jitter factor and
// with sliding = true (which means the timer for period starts after the f
// completes).
func Until(f func(), period time.Duration, stopCh <-chan struct{}) {
JitterUntil(f, period, 0.0, true, stopCh)
}
// UntilWithContext loops until context is done, running f every period.
//
// UntilWithContext is syntactic sugar on top of JitterUntilWithContext
// with zero jitter factor and with sliding = true (which means the timer
// for period starts after the f completes).
func UntilWithContext(ctx context.Context, f func(context.Context), period time.Duration) {
JitterUntilWithContext(ctx, f, period, 0.0, true)
}
// NonSlidingUntil loops until stop channel is closed, running f every
// period.
//
// NonSlidingUntil is syntactic sugar on top of JitterUntil with zero jitter
// factor, with sliding = false (meaning the timer for period starts at the same
// time as the function starts).
func NonSlidingUntil(f func(), period time.Duration, stopCh <-chan struct{}) {
JitterUntil(f, period, 0.0, false, stopCh)
}
// NonSlidingUntilWithContext loops until context is done, running f every
// period.
//
// NonSlidingUntilWithContext is syntactic sugar on top of JitterUntilWithContext
// with zero jitter factor, with sliding = false (meaning the timer for period
// starts at the same time as the function starts).
func NonSlidingUntilWithContext(ctx context.Context, f func(context.Context), period time.Duration) {
JitterUntilWithContext(ctx, f, period, 0.0, false)
}
// JitterUntil loops until stop channel is closed, running f every period.
//
// If jitterFactor is positive, the period is jittered before every run of f.
// If jitterFactor is not positive, the period is unchanged and not jittered.
//
// If sliding is true, the period is computed after f runs. If it is false then
// period includes the runtime for f.
//
// Close stopCh to stop. f may not be invoked if stop channel is already
// closed. Pass NeverStop to if you don't want it stop.
func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding bool, stopCh <-chan struct{}) {
BackoffUntil(f, NewJitteredBackoffManager(period, jitterFactor, &clock.RealClock{}), sliding, stopCh)
}
// BackoffUntil loops until stop channel is closed, run f every duration given by BackoffManager.
//
// If sliding is true, the period is computed after f runs. If it is false then
// period includes the runtime for f.
func BackoffUntil(f func(), backoff BackoffManager, sliding bool, stopCh <-chan struct{}) {
var t clock.Timer
for {
select {
case <-stopCh:
return
default:
}
if !sliding {
t = backoff.Backoff()
}
func() {
defer runtime.HandleCrash()
f()
}()
if sliding {
t = backoff.Backoff()
}
// NOTE: b/c there is no priority selection in golang
// it is possible for this to race, meaning we could
// trigger t.C and stopCh, and t.C select falls through.
// In order to mitigate we re-check stopCh at the beginning
// of every loop to prevent extra executions of f().
select {
case <-stopCh:
return
case <-t.C():
}
}
}
// JitterUntilWithContext loops until context is done, running f every period.
//
// If jitterFactor is positive, the period is jittered before every run of f.
// If jitterFactor is not positive, the period is unchanged and not jittered.
//
// If sliding is true, the period is computed after f runs. If it is false then
// period includes the runtime for f.
//
// Cancel context to stop. f may not be invoked if context is already expired.
func JitterUntilWithContext(ctx context.Context, f func(context.Context), period time.Duration, jitterFactor float64, sliding bool) {
JitterUntil(func() { f(ctx) }, period, jitterFactor, sliding, ctx.Done())
}
// Jitter returns a time.Duration between duration and duration + maxFactor *
// duration.
//
// This allows clients to avoid converging on periodic behavior. If maxFactor
// is 0.0, a suggested default value will be chosen.
func Jitter(duration time.Duration, maxFactor float64) time.Duration {
if maxFactor <= 0.0 {
maxFactor = 1.0
}
wait := duration + time.Duration(rand.Float64()*maxFactor*float64(duration))
return wait
}
// ErrWaitTimeout is returned when the condition exited without success.
var ErrWaitTimeout = errors.New("timed out waiting for the condition")
// ConditionFunc returns true if the condition is satisfied, or an error
// if the loop should be aborted.
type ConditionFunc func() (done bool, err error)
// runConditionWithCrashProtection runs a ConditionFunc with crash protection
func runConditionWithCrashProtection(condition ConditionFunc) (bool, error) {
defer runtime.HandleCrash()
return condition()
}
// Backoff holds parameters applied to a Backoff function.
type Backoff struct {
// The initial duration.
Duration time.Duration
// Duration is multiplied by factor each iteration, if factor is not zero
// and the limits imposed by Steps and Cap have not been reached.
// Should not be negative.
// The jitter does not contribute to the updates to the duration parameter.
Factor float64
// The sleep at each iteration is the duration plus an additional
// amount chosen uniformly at random from the interval between
// zero and `jitter*duration`.
Jitter float64
// The remaining number of iterations in which the duration
// parameter may change (but progress can be stopped earlier by
// hitting the cap). If not positive, the duration is not
// changed. Used for exponential backoff in combination with
// Factor and Cap.
Steps int
// A limit on revised values of the duration parameter. If a
// multiplication by the factor parameter would make the duration
// exceed the cap then the duration is set to the cap and the
// steps parameter is set to zero.
Cap time.Duration
}
// Step (1) returns an amount of time to sleep determined by the
// original Duration and Jitter and (2) mutates the provided Backoff
// to update its Steps and Duration.
func (b *Backoff) Step() time.Duration {
if b.Steps < 1 {
if b.Jitter > 0 {
return Jitter(b.Duration, b.Jitter)
}
return b.Duration
}
b.Steps--
duration := b.Duration
// calculate the next step
if b.Factor != 0 {
b.Duration = time.Duration(float64(b.Duration) * b.Factor)
if b.Cap > 0 && b.Duration > b.Cap {
b.Duration = b.Cap
b.Steps = 0
}
}
if b.Jitter > 0 {
duration = Jitter(duration, b.Jitter)
}
return duration
}
// contextForChannel derives a child context from a parent channel.
//
// The derived context's Done channel is closed when the returned cancel function
// is called or when the parent channel is closed, whichever happens first.
//
// Note the caller must *always* call the CancelFunc, otherwise resources may be leaked.
func contextForChannel(parentCh <-chan struct{}) (context.Context, context.CancelFunc) {
ctx, cancel := context.WithCancel(context.Background())
go func() {
select {
case <-parentCh:
cancel()
case <-ctx.Done():
}
}()
return ctx, cancel
}
// BackoffManager manages backoff with a particular scheme based on its underlying implementation. It provides
// an interface to return a timer for backoff, and caller shall backoff until Timer.C returns. If the second Backoff()
// is called before the timer from the first Backoff() call finishes, the first timer will NOT be drained.
// The BackoffManager is supposed to be called in a single-threaded environment.
type BackoffManager interface {
Backoff() clock.Timer
}
type exponentialBackoffManagerImpl struct {
backoff *Backoff
backoffTimer clock.Timer
lastBackoffStart time.Time
initialBackoff time.Duration
backoffResetDuration time.Duration
clock clock.Clock
}
// NewExponentialBackoffManager returns a manager for managing exponential backoff. Each backoff is jittered and
// backoff will not exceed the given max. If the backoff is not called within resetDuration, the backoff is reset.
// This backoff manager is used to reduce load during upstream unhealthiness.
func NewExponentialBackoffManager(initBackoff, maxBackoff, resetDuration time.Duration, backoffFactor, jitter float64, c clock.Clock) BackoffManager {
return &exponentialBackoffManagerImpl{
backoff: &Backoff{
Duration: initBackoff,
Factor: backoffFactor,
Jitter: jitter,
// the current impl of wait.Backoff returns Backoff.Duration once steps are used up, which is not
// what we ideally need here, we set it to max int and assume we will never use up the steps
Steps: math.MaxInt32,
Cap: maxBackoff,
},
backoffTimer: c.NewTimer(0),
initialBackoff: initBackoff,
lastBackoffStart: c.Now(),
backoffResetDuration: resetDuration,
clock: c,
}
}
func (b *exponentialBackoffManagerImpl) getNextBackoff() time.Duration {
if b.clock.Now().Sub(b.lastBackoffStart) > b.backoffResetDuration {
b.backoff.Steps = math.MaxInt32
b.backoff.Duration = b.initialBackoff
}
b.lastBackoffStart = b.clock.Now()
return b.backoff.Step()
}
// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for backoff.
func (b *exponentialBackoffManagerImpl) Backoff() clock.Timer {
b.backoffTimer.Reset(b.getNextBackoff())
return b.backoffTimer
}
type jitteredBackoffManagerImpl struct {
clock clock.Clock
duration time.Duration
jitter float64
backoffTimer clock.Timer
}
// NewJitteredBackoffManager returns a BackoffManager that backoffs with given duration plus given jitter. If the jitter
// is negative, backoff will not be jittered.
func NewJitteredBackoffManager(duration time.Duration, jitter float64, c clock.Clock) BackoffManager {
return &jitteredBackoffManagerImpl{
clock: c,
duration: duration,
jitter: jitter,
backoffTimer: c.NewTimer(0),
}
}
func (j *jitteredBackoffManagerImpl) getNextBackoff() time.Duration {
jitteredPeriod := j.duration
if j.jitter > 0.0 {
jitteredPeriod = Jitter(j.duration, j.jitter)
}
return jitteredPeriod
}
func (j *jitteredBackoffManagerImpl) Backoff() clock.Timer {
j.backoffTimer.Reset(j.getNextBackoff())
return j.backoffTimer
}
// ExponentialBackoff repeats a condition check with exponential backoff.
//
// It repeatedly checks the condition and then sleeps, using `backoff.Step()`
// to determine the length of the sleep and adjust Duration and Steps.
// Stops and returns as soon as:
// 1. the condition check returns true or an error,
// 2. `backoff.Steps` checks of the condition have been done, or
// 3. a sleep truncated by the cap on duration has been completed.
// In case (1) the returned error is what the condition function returned.
// In all other cases, ErrWaitTimeout is returned.
func ExponentialBackoff(backoff Backoff, condition ConditionFunc) error {
for backoff.Steps > 0 {
if ok, err := runConditionWithCrashProtection(condition); err != nil || ok {
return err
}
if backoff.Steps == 1 {
break
}
time.Sleep(backoff.Step())
}
return ErrWaitTimeout
}
// Poll tries a condition func until it returns true, an error, or the timeout
// is reached.
//
// Poll always waits the interval before the run of 'condition'.
// 'condition' will always be invoked at least once.
//
// Some intervals may be missed if the condition takes too long or the time
// window is too short.
//
// If you want to Poll something forever, see PollInfinite.
func Poll(interval, timeout time.Duration, condition ConditionFunc) error {
return pollInternal(poller(interval, timeout), condition)
}
func pollInternal(wait WaitFunc, condition ConditionFunc) error {
done := make(chan struct{})
defer close(done)
return WaitFor(wait, condition, done)
}
// PollImmediate tries a condition func until it returns true, an error, or the timeout
// is reached.
//
// PollImmediate always checks 'condition' before waiting for the interval. 'condition'
// will always be invoked at least once.
//
// Some intervals may be missed if the condition takes too long or the time
// window is too short.
//
// If you want to immediately Poll something forever, see PollImmediateInfinite.
func PollImmediate(interval, timeout time.Duration, condition ConditionFunc) error {
return pollImmediateInternal(poller(interval, timeout), condition)
}
func pollImmediateInternal(wait WaitFunc, condition ConditionFunc) error {
done, err := runConditionWithCrashProtection(condition)
if err != nil {
return err
}
if done {
return nil
}
return pollInternal(wait, condition)
}
// PollInfinite tries a condition func until it returns true or an error
//
// PollInfinite always waits the interval before the run of 'condition'.
//
// Some intervals may be missed if the condition takes too long or the time
// window is too short.
func PollInfinite(interval time.Duration, condition ConditionFunc) error {
done := make(chan struct{})
defer close(done)
return PollUntil(interval, condition, done)
}
// PollImmediateInfinite tries a condition func until it returns true or an error
//
// PollImmediateInfinite runs the 'condition' before waiting for the interval.
//
// Some intervals may be missed if the condition takes too long or the time
// window is too short.
func PollImmediateInfinite(interval time.Duration, condition ConditionFunc) error {
done, err := runConditionWithCrashProtection(condition)
if err != nil {
return err
}
if done {
return nil
}
return PollInfinite(interval, condition)
}
// PollUntil tries a condition func until it returns true, an error or stopCh is
// closed.
//
// PollUntil always waits interval before the first run of 'condition'.
// 'condition' will always be invoked at least once.
func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error {
ctx, cancel := contextForChannel(stopCh)
defer cancel()
return WaitFor(poller(interval, 0), condition, ctx.Done())
}
// PollImmediateUntil tries a condition func until it returns true, an error or stopCh is closed.
//
// PollImmediateUntil runs the 'condition' before waiting for the interval.
// 'condition' will always be invoked at least once.
func PollImmediateUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error {
done, err := condition()
if err != nil {
return err
}
if done {
return nil
}
select {
case <-stopCh:
return ErrWaitTimeout
default:
return PollUntil(interval, condition, stopCh)
}
}
// WaitFunc creates a channel that receives an item every time a test
// should be executed and is closed when the last test should be invoked.
type WaitFunc func(done <-chan struct{}) <-chan struct{}
// WaitFor continually checks 'fn' as driven by 'wait'.
//
// WaitFor gets a channel from 'wait()'', and then invokes 'fn' once for every value
// placed on the channel and once more when the channel is closed. If the channel is closed
// and 'fn' returns false without error, WaitFor returns ErrWaitTimeout.
//
// If 'fn' returns an error the loop ends and that error is returned. If
// 'fn' returns true the loop ends and nil is returned.
//
// ErrWaitTimeout will be returned if the 'done' channel is closed without fn ever
// returning true.
//
// When the done channel is closed, because the golang `select` statement is
// "uniform pseudo-random", the `fn` might still run one or multiple time,
// though eventually `WaitFor` will return.
func WaitFor(wait WaitFunc, fn ConditionFunc, done <-chan struct{}) error {
stopCh := make(chan struct{})
defer close(stopCh)
c := wait(stopCh)
for {
select {
case _, open := <-c:
ok, err := runConditionWithCrashProtection(fn)
if err != nil {
return err
}
if ok {
return nil
}
if !open {
return ErrWaitTimeout
}
case <-done:
return ErrWaitTimeout
}
}
}
// poller returns a WaitFunc that will send to the channel every interval until
// timeout has elapsed and then closes the channel.
//
// Over very short intervals you may receive no ticks before the channel is
// closed. A timeout of 0 is interpreted as an infinity, and in such a case
// it would be the caller's responsibility to close the done channel.
// Failure to do so would result in a leaked goroutine.
//
// Output ticks are not buffered. If the channel is not ready to receive an
// item, the tick is skipped.
func poller(interval, timeout time.Duration) WaitFunc {
return WaitFunc(func(done <-chan struct{}) <-chan struct{} {
ch := make(chan struct{})
go func() {
defer close(ch)
tick := time.NewTicker(interval)
defer tick.Stop()
var after <-chan time.Time
if timeout != 0 {
// time.After is more convenient, but it
// potentially leaves timers around much longer
// than necessary if we exit early.
timer := time.NewTimer(timeout)
after = timer.C
defer timer.Stop()
}
for {
select {
case <-tick.C:
// If the consumer isn't ready for this signal drop it and
// check the other channels.
select {
case ch <- struct{}{}:
default:
}
case <-after:
return
case <-done:
return
}
}
}()
return ch
})
}

23
vendor/k8s.io/apiserver/go.mod generated vendored
View File

@ -29,7 +29,6 @@ require (
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822
github.com/pkg/errors v0.8.1
github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021 // indirect
github.com/prometheus/client_golang v1.0.0
github.com/prometheus/client_model v0.2.0
github.com/sirupsen/logrus v1.4.2 // indirect
github.com/spf13/pflag v1.0.5
@ -44,23 +43,23 @@ require (
gopkg.in/square/go-jose.v2 v2.2.2
gopkg.in/yaml.v2 v2.2.8
gotest.tools v2.2.0+incompatible // indirect
k8s.io/api v0.18.0-beta.1
k8s.io/apimachinery v0.18.0-beta.1
k8s.io/client-go v0.18.0-beta.1
k8s.io/component-base v0.18.0-beta.1
k8s.io/api v0.18.0
k8s.io/apimachinery v0.18.0
k8s.io/client-go v0.18.0
k8s.io/component-base v0.18.0
k8s.io/klog v1.0.0
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c
k8s.io/utils v0.0.0-20200117235808-5f6fbceb4c31
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.5
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7
sigs.k8s.io/structured-merge-diff/v3 v3.0.0
sigs.k8s.io/yaml v1.2.0
)
replace (
golang.org/x/sys => golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a // pinned to release-branch.go1.13
golang.org/x/tools => golang.org/x/tools v0.0.0-20190821162956-65e3620a7ae7 // pinned to release-branch.go1.13
k8s.io/api => k8s.io/api v0.18.0-beta.1
k8s.io/apimachinery => k8s.io/apimachinery v0.18.0-beta.1
k8s.io/client-go => k8s.io/client-go v0.18.0-beta.1
k8s.io/component-base => k8s.io/component-base v0.18.0-beta.1
k8s.io/api => k8s.io/api v0.18.0
k8s.io/apimachinery => k8s.io/apimachinery v0.18.0
k8s.io/client-go => k8s.io/client-go v0.18.0
k8s.io/component-base => k8s.io/component-base v0.18.0
)

10
vendor/k8s.io/client-go/go.mod generated vendored
View File

@ -28,16 +28,16 @@ require (
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4
google.golang.org/appengine v1.5.0 // indirect
k8s.io/api v0.18.0-beta.1
k8s.io/apimachinery v0.18.0-beta.1
k8s.io/api v0.18.0
k8s.io/apimachinery v0.18.0
k8s.io/klog v1.0.0
k8s.io/utils v0.0.0-20200117235808-5f6fbceb4c31
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89
sigs.k8s.io/yaml v1.2.0
)
replace (
golang.org/x/sys => golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a // pinned to release-branch.go1.13
golang.org/x/tools => golang.org/x/tools v0.0.0-20190821162956-65e3620a7ae7 // pinned to release-branch.go1.13
k8s.io/api => k8s.io/api v0.18.0-beta.1
k8s.io/apimachinery => k8s.io/apimachinery v0.18.0-beta.1
k8s.io/api => k8s.io/api v0.18.0
k8s.io/apimachinery => k8s.io/apimachinery v0.18.0
)

View File

@ -39,6 +39,7 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer/streaming"
utilclock "k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/watch"
restclientwatch "k8s.io/client-go/rest/watch"
@ -556,36 +557,68 @@ func (r *Request) tryThrottle(ctx context.Context) error {
klog.V(3).Infof("Throttling request took %v, request: %s:%s", latency, r.verb, r.URL().String())
}
if latency > extraLongThrottleLatency {
globalThrottledLogger.Log(2, fmt.Sprintf("Throttling request took %v, request: %s:%s", latency, r.verb, r.URL().String()))
// If the rate limiter latency is very high, the log message should be printed at a higher log level,
// but we use a throttled logger to prevent spamming.
globalThrottledLogger.Infof("Throttling request took %v, request: %s:%s", latency, r.verb, r.URL().String())
}
metrics.RateLimiterLatency.Observe(r.verb, r.finalURLTemplate(), latency)
return err
}
type throttledLogger struct {
logTimeLock sync.RWMutex
lastLogTime time.Time
type throttleSettings struct {
logLevel klog.Level
minLogInterval time.Duration
lastLogTime time.Time
lock sync.RWMutex
}
type throttledLogger struct {
clock utilclock.PassiveClock
settings []*throttleSettings
}
var globalThrottledLogger = &throttledLogger{
clock: utilclock.RealClock{},
settings: []*throttleSettings{
{
logLevel: 2,
minLogInterval: 1 * time.Second,
}, {
logLevel: 0,
minLogInterval: 10 * time.Second,
},
},
}
func (b *throttledLogger) Log(level klog.Level, message string) {
if bool(klog.V(level)) {
func (b *throttledLogger) attemptToLog() (klog.Level, bool) {
for _, setting := range b.settings {
if bool(klog.V(setting.logLevel)) {
// Return early without write locking if possible.
if func() bool {
b.logTimeLock.RLock()
defer b.logTimeLock.RUnlock()
return time.Since(b.lastLogTime) > b.minLogInterval
setting.lock.RLock()
defer setting.lock.RUnlock()
return b.clock.Since(setting.lastLogTime) >= setting.minLogInterval
}() {
b.logTimeLock.Lock()
defer b.logTimeLock.Unlock()
if time.Since(b.lastLogTime) > b.minLogInterval {
klog.V(level).Info(message)
b.lastLogTime = time.Now()
setting.lock.Lock()
defer setting.lock.Unlock()
if b.clock.Since(setting.lastLogTime) >= setting.minLogInterval {
setting.lastLogTime = b.clock.Now()
return setting.logLevel, true
}
}
return -1, false
}
}
return -1, false
}
// Infof will write a log message at each logLevel specified by the reciever's throttleSettings
// as long as it hasn't written a log message more recently than minLogInterval.
func (b *throttledLogger) Infof(message string, args ...interface{}) {
if logLevel, ok := b.attemptToLog(); ok {
klog.V(logLevel).Infof(message, args...)
}
}
@ -835,7 +868,7 @@ func (r *Request) request(ctx context.Context, fn func(*http.Request, *http.Resp
return err
}
// For connection errors and apiserver shutdown errors retry.
if net.IsConnectionReset(err) {
if net.IsConnectionReset(err) || net.IsProbableEOF(err) {
// For the purpose of retry, we set the artificial "retry-after" response.
// TODO: Should we clean the original response if it exists?
resp = &http.Response{

View File

@ -70,6 +70,9 @@ type Cluster struct {
LocationOfOrigin string
// Server is the address of the kubernetes cluster (https://hostname:port).
Server string `json:"server"`
// TLSServerName is used to check server certificate. If TLSServerName is empty, the hostname used to contact the server is used.
// +optional
TLSServerName string `json:"tls-server-name,omitempty"`
// InsecureSkipTLSVerify skips the validity check for the server's certificate. This will make your HTTPS connections insecure.
// +optional
InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"`

View File

@ -53,6 +53,8 @@ var (
ClientCertRotationAge DurationMetric = noopDuration{}
// RequestLatency is the latency metric that rest clients will update.
RequestLatency LatencyMetric = noopLatency{}
// RateLimiterLatency is the client side rate limiter latency metric.
RateLimiterLatency LatencyMetric = noopLatency{}
// RequestResult is the result metric that rest clients will update.
RequestResult ResultMetric = noopResult{}
)
@ -62,6 +64,7 @@ type RegisterOpts struct {
ClientCertExpiry ExpiryMetric
ClientCertRotationAge DurationMetric
RequestLatency LatencyMetric
RateLimiterLatency LatencyMetric
RequestResult ResultMetric
}
@ -78,6 +81,9 @@ func Register(opts RegisterOpts) {
if opts.RequestLatency != nil {
RequestLatency = opts.RequestLatency
}
if opts.RateLimiterLatency != nil {
RateLimiterLatency = opts.RateLimiterLatency
}
if opts.RequestResult != nil {
RequestResult = opts.RequestResult
}

View File

@ -25,6 +25,7 @@ import (
"time"
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/wait"
)
// TlsTransportCache caches TLS http.RoundTrippers different configurations. The
@ -44,6 +45,8 @@ type tlsCacheKey struct {
caData string
certData string
keyData string
certFile string
keyFile string
getCert string
serverName string
nextProtos string
@ -91,6 +94,16 @@ func (c *tlsTransportCache) get(config *Config) (http.RoundTripper, error) {
KeepAlive: 30 * time.Second,
}).DialContext
}
// If we use are reloading files, we need to handle certificate rotation properly
// TODO(jackkleeman): We can also add rotation here when config.HasCertCallback() is true
if config.TLS.ReloadTLSFiles {
dynamicCertDialer := certRotatingDialer(tlsConfig.GetClientCertificate, dial)
tlsConfig.GetClientCertificate = dynamicCertDialer.GetClientCertificate
dial = dynamicCertDialer.connDialer.DialContext
go dynamicCertDialer.Run(wait.NeverStop)
}
// Cache a single transport for these options
c.transports[key] = utilnet.SetTransportDefaults(&http.Transport{
Proxy: http.ProxyFromEnvironment,
@ -109,15 +122,23 @@ func tlsConfigKey(c *Config) (tlsCacheKey, error) {
if err := loadTLSFiles(c); err != nil {
return tlsCacheKey{}, err
}
return tlsCacheKey{
k := tlsCacheKey{
insecure: c.TLS.Insecure,
caData: string(c.TLS.CAData),
certData: string(c.TLS.CertData),
keyData: string(c.TLS.KeyData),
getCert: fmt.Sprintf("%p", c.TLS.GetCert),
serverName: c.TLS.ServerName,
nextProtos: strings.Join(c.TLS.NextProtos, ","),
dial: fmt.Sprintf("%p", c.Dial),
disableCompression: c.DisableCompression,
}, nil
}
if c.TLS.ReloadTLSFiles {
k.certFile = c.TLS.CertFile
k.keyFile = c.TLS.KeyFile
} else {
k.certData = string(c.TLS.CertData)
k.keyData = string(c.TLS.KeyData)
}
return k, nil
}

176
vendor/k8s.io/client-go/transport/cert_rotation.go generated vendored Normal file
View File

@ -0,0 +1,176 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package transport
import (
"bytes"
"crypto/tls"
"fmt"
"reflect"
"sync"
"time"
utilnet "k8s.io/apimachinery/pkg/util/net"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/util/connrotation"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog"
)
const workItemKey = "key"
// CertCallbackRefreshDuration is exposed so that integration tests can crank up the reload speed.
var CertCallbackRefreshDuration = 5 * time.Minute
type reloadFunc func(*tls.CertificateRequestInfo) (*tls.Certificate, error)
type dynamicClientCert struct {
clientCert *tls.Certificate
certMtx sync.RWMutex
reload reloadFunc
connDialer *connrotation.Dialer
// queue only ever has one item, but it has nice error handling backoff/retry semantics
queue workqueue.RateLimitingInterface
}
func certRotatingDialer(reload reloadFunc, dial utilnet.DialFunc) *dynamicClientCert {
d := &dynamicClientCert{
reload: reload,
connDialer: connrotation.NewDialer(connrotation.DialFunc(dial)),
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "DynamicClientCertificate"),
}
return d
}
// loadClientCert calls the callback and rotates connections if needed
func (c *dynamicClientCert) loadClientCert() (*tls.Certificate, error) {
cert, err := c.reload(nil)
if err != nil {
return nil, err
}
// check to see if we have a change. If the values are the same, do nothing.
c.certMtx.RLock()
haveCert := c.clientCert != nil
if certsEqual(c.clientCert, cert) {
c.certMtx.RUnlock()
return c.clientCert, nil
}
c.certMtx.RUnlock()
c.certMtx.Lock()
c.clientCert = cert
c.certMtx.Unlock()
// The first certificate requested is not a rotation that is worth closing connections for
if !haveCert {
return cert, nil
}
klog.V(1).Infof("certificate rotation detected, shutting down client connections to start using new credentials")
c.connDialer.CloseAll()
return cert, nil
}
// certsEqual compares tls Certificates, ignoring the Leaf which may get filled in dynamically
func certsEqual(left, right *tls.Certificate) bool {
if left == nil || right == nil {
return left == right
}
if !byteMatrixEqual(left.Certificate, right.Certificate) {
return false
}
if !reflect.DeepEqual(left.PrivateKey, right.PrivateKey) {
return false
}
if !byteMatrixEqual(left.SignedCertificateTimestamps, right.SignedCertificateTimestamps) {
return false
}
if !bytes.Equal(left.OCSPStaple, right.OCSPStaple) {
return false
}
return true
}
func byteMatrixEqual(left, right [][]byte) bool {
if len(left) != len(right) {
return false
}
for i := range left {
if !bytes.Equal(left[i], right[i]) {
return false
}
}
return true
}
// run starts the controller and blocks until stopCh is closed.
func (c *dynamicClientCert) Run(stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer c.queue.ShutDown()
klog.V(3).Infof("Starting client certificate rotation controller")
defer klog.V(3).Infof("Shutting down client certificate rotation controller")
go wait.Until(c.runWorker, time.Second, stopCh)
go wait.PollImmediateUntil(CertCallbackRefreshDuration, func() (bool, error) {
c.queue.Add(workItemKey)
return false, nil
}, stopCh)
<-stopCh
}
func (c *dynamicClientCert) runWorker() {
for c.processNextWorkItem() {
}
}
func (c *dynamicClientCert) processNextWorkItem() bool {
dsKey, quit := c.queue.Get()
if quit {
return false
}
defer c.queue.Done(dsKey)
_, err := c.loadClientCert()
if err == nil {
c.queue.Forget(dsKey)
return true
}
utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err))
c.queue.AddRateLimited(dsKey)
return true
}
func (c *dynamicClientCert) GetClientCertificate(*tls.CertificateRequestInfo) (*tls.Certificate, error) {
return c.loadClientCert()
}

View File

@ -118,6 +118,7 @@ type TLSConfig struct {
CAFile string // Path of the PEM-encoded server trusted root certificates.
CertFile string // Path of the PEM-encoded client certificate.
KeyFile string // Path of the PEM-encoded client key.
ReloadTLSFiles bool // Set to indicate that the original config provided files, and that they should be reloaded
Insecure bool // Server should be accessed without verifying the certificate. For testing only.
ServerName string // Override for the server name passed to the server for SNI and used to verify certificates.

View File

@ -23,6 +23,8 @@ import (
"fmt"
"io/ioutil"
"net/http"
"sync"
"time"
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/klog"
@ -81,7 +83,8 @@ func TLSConfigFor(c *Config) (*tls.Config, error) {
}
var staticCert *tls.Certificate
if c.HasCertAuth() {
// Treat cert as static if either key or cert was data, not a file
if c.HasCertAuth() && !c.TLS.ReloadTLSFiles {
// If key/cert were provided, verify them before setting up
// tlsConfig.GetClientCertificate.
cert, err := tls.X509KeyPair(c.TLS.CertData, c.TLS.KeyData)
@ -91,6 +94,11 @@ func TLSConfigFor(c *Config) (*tls.Config, error) {
staticCert = &cert
}
var dynamicCertLoader func() (*tls.Certificate, error)
if c.TLS.ReloadTLSFiles {
dynamicCertLoader = cachingCertificateLoader(c.TLS.CertFile, c.TLS.KeyFile)
}
if c.HasCertAuth() || c.HasCertCallback() {
tlsConfig.GetClientCertificate = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) {
// Note: static key/cert data always take precedence over cert
@ -98,6 +106,10 @@ func TLSConfigFor(c *Config) (*tls.Config, error) {
if staticCert != nil {
return staticCert, nil
}
// key/cert files lead to ReloadTLSFiles being set - takes precedence over cert callback
if dynamicCertLoader != nil {
return dynamicCertLoader()
}
if c.HasCertCallback() {
cert, err := c.TLS.GetCert()
if err != nil {
@ -129,6 +141,11 @@ func loadTLSFiles(c *Config) error {
return err
}
// Check that we are purely loading from files
if len(c.TLS.CertFile) > 0 && len(c.TLS.CertData) == 0 && len(c.TLS.KeyFile) > 0 && len(c.TLS.KeyData) == 0 {
c.TLS.ReloadTLSFiles = true
}
c.TLS.CertData, err = dataFromSliceOrFile(c.TLS.CertData, c.TLS.CertFile)
if err != nil {
return err
@ -243,3 +260,44 @@ func tryCancelRequest(rt http.RoundTripper, req *http.Request) {
klog.Warningf("Unable to cancel request for %T", rt)
}
}
type certificateCacheEntry struct {
cert *tls.Certificate
err error
birth time.Time
}
// isStale returns true when this cache entry is too old to be usable
func (c *certificateCacheEntry) isStale() bool {
return time.Now().Sub(c.birth) > time.Second
}
func newCertificateCacheEntry(certFile, keyFile string) certificateCacheEntry {
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
return certificateCacheEntry{cert: &cert, err: err, birth: time.Now()}
}
// cachingCertificateLoader ensures that we don't hammer the filesystem when opening many connections
// the underlying cert files are read at most once every second
func cachingCertificateLoader(certFile, keyFile string) func() (*tls.Certificate, error) {
current := newCertificateCacheEntry(certFile, keyFile)
var currentMtx sync.RWMutex
return func() (*tls.Certificate, error) {
currentMtx.RLock()
if current.isStale() {
currentMtx.RUnlock()
currentMtx.Lock()
defer currentMtx.Unlock()
if current.isStale() {
current = newCertificateCacheEntry(certFile, keyFile)
}
} else {
defer currentMtx.RUnlock()
}
return current.cert, current.err
}
}

View File

@ -0,0 +1,259 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package workqueue
import (
"math"
"sync"
"time"
"golang.org/x/time/rate"
)
type RateLimiter interface {
// When gets an item and gets to decide how long that item should wait
When(item interface{}) time.Duration
// Forget indicates that an item is finished being retried. Doesn't matter whether its for perm failing
// or for success, we'll stop tracking it
Forget(item interface{})
// NumRequeues returns back how many failures the item has had
NumRequeues(item interface{}) int
}
// DefaultControllerRateLimiter is a no-arg constructor for a default rate limiter for a workqueue. It has
// both overall and per-item rate limiting. The overall is a token bucket and the per-item is exponential
func DefaultControllerRateLimiter() RateLimiter {
return NewMaxOfRateLimiter(
NewItemExponentialFailureRateLimiter(5*time.Millisecond, 1000*time.Second),
// 10 qps, 100 bucket size. This is only for retry speed and its only the overall factor (not per item)
&BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)},
)
}
// BucketRateLimiter adapts a standard bucket to the workqueue ratelimiter API
type BucketRateLimiter struct {
*rate.Limiter
}
var _ RateLimiter = &BucketRateLimiter{}
func (r *BucketRateLimiter) When(item interface{}) time.Duration {
return r.Limiter.Reserve().Delay()
}
func (r *BucketRateLimiter) NumRequeues(item interface{}) int {
return 0
}
func (r *BucketRateLimiter) Forget(item interface{}) {
}
// ItemBucketRateLimiter implements a workqueue ratelimiter API using standard rate.Limiter.
// Each key is using a separate limiter.
type ItemBucketRateLimiter struct {
r rate.Limit
burst int
limitersLock sync.Mutex
limiters map[interface{}]*rate.Limiter
}
var _ RateLimiter = &ItemBucketRateLimiter{}
// NewItemBucketRateLimiter creates new ItemBucketRateLimiter instance.
func NewItemBucketRateLimiter(r rate.Limit, burst int) *ItemBucketRateLimiter {
return &ItemBucketRateLimiter{
r: r,
burst: burst,
limiters: make(map[interface{}]*rate.Limiter),
}
}
// When returns a time.Duration which we need to wait before item is processed.
func (r *ItemBucketRateLimiter) When(item interface{}) time.Duration {
r.limitersLock.Lock()
defer r.limitersLock.Unlock()
limiter, ok := r.limiters[item]
if !ok {
limiter = rate.NewLimiter(r.r, r.burst)
r.limiters[item] = limiter
}
return limiter.Reserve().Delay()
}
// NumRequeues returns always 0 (doesn't apply to ItemBucketRateLimiter).
func (r *ItemBucketRateLimiter) NumRequeues(item interface{}) int {
return 0
}
// Forget removes item from the internal state.
func (r *ItemBucketRateLimiter) Forget(item interface{}) {
r.limitersLock.Lock()
defer r.limitersLock.Unlock()
delete(r.limiters, item)
}
// ItemExponentialFailureRateLimiter does a simple baseDelay*2^<num-failures> limit
// dealing with max failures and expiration are up to the caller
type ItemExponentialFailureRateLimiter struct {
failuresLock sync.Mutex
failures map[interface{}]int
baseDelay time.Duration
maxDelay time.Duration
}
var _ RateLimiter = &ItemExponentialFailureRateLimiter{}
func NewItemExponentialFailureRateLimiter(baseDelay time.Duration, maxDelay time.Duration) RateLimiter {
return &ItemExponentialFailureRateLimiter{
failures: map[interface{}]int{},
baseDelay: baseDelay,
maxDelay: maxDelay,
}
}
func DefaultItemBasedRateLimiter() RateLimiter {
return NewItemExponentialFailureRateLimiter(time.Millisecond, 1000*time.Second)
}
func (r *ItemExponentialFailureRateLimiter) When(item interface{}) time.Duration {
r.failuresLock.Lock()
defer r.failuresLock.Unlock()
exp := r.failures[item]
r.failures[item] = r.failures[item] + 1
// The backoff is capped such that 'calculated' value never overflows.
backoff := float64(r.baseDelay.Nanoseconds()) * math.Pow(2, float64(exp))
if backoff > math.MaxInt64 {
return r.maxDelay
}
calculated := time.Duration(backoff)
if calculated > r.maxDelay {
return r.maxDelay
}
return calculated
}
func (r *ItemExponentialFailureRateLimiter) NumRequeues(item interface{}) int {
r.failuresLock.Lock()
defer r.failuresLock.Unlock()
return r.failures[item]
}
func (r *ItemExponentialFailureRateLimiter) Forget(item interface{}) {
r.failuresLock.Lock()
defer r.failuresLock.Unlock()
delete(r.failures, item)
}
// ItemFastSlowRateLimiter does a quick retry for a certain number of attempts, then a slow retry after that
type ItemFastSlowRateLimiter struct {
failuresLock sync.Mutex
failures map[interface{}]int
maxFastAttempts int
fastDelay time.Duration
slowDelay time.Duration
}
var _ RateLimiter = &ItemFastSlowRateLimiter{}
func NewItemFastSlowRateLimiter(fastDelay, slowDelay time.Duration, maxFastAttempts int) RateLimiter {
return &ItemFastSlowRateLimiter{
failures: map[interface{}]int{},
fastDelay: fastDelay,
slowDelay: slowDelay,
maxFastAttempts: maxFastAttempts,
}
}
func (r *ItemFastSlowRateLimiter) When(item interface{}) time.Duration {
r.failuresLock.Lock()
defer r.failuresLock.Unlock()
r.failures[item] = r.failures[item] + 1
if r.failures[item] <= r.maxFastAttempts {
return r.fastDelay
}
return r.slowDelay
}
func (r *ItemFastSlowRateLimiter) NumRequeues(item interface{}) int {
r.failuresLock.Lock()
defer r.failuresLock.Unlock()
return r.failures[item]
}
func (r *ItemFastSlowRateLimiter) Forget(item interface{}) {
r.failuresLock.Lock()
defer r.failuresLock.Unlock()
delete(r.failures, item)
}
// MaxOfRateLimiter calls every RateLimiter and returns the worst case response
// When used with a token bucket limiter, the burst could be apparently exceeded in cases where particular items
// were separately delayed a longer time.
type MaxOfRateLimiter struct {
limiters []RateLimiter
}
func (r *MaxOfRateLimiter) When(item interface{}) time.Duration {
ret := time.Duration(0)
for _, limiter := range r.limiters {
curr := limiter.When(item)
if curr > ret {
ret = curr
}
}
return ret
}
func NewMaxOfRateLimiter(limiters ...RateLimiter) RateLimiter {
return &MaxOfRateLimiter{limiters: limiters}
}
func (r *MaxOfRateLimiter) NumRequeues(item interface{}) int {
ret := 0
for _, limiter := range r.limiters {
curr := limiter.NumRequeues(item)
if curr > ret {
ret = curr
}
}
return ret
}
func (r *MaxOfRateLimiter) Forget(item interface{}) {
for _, limiter := range r.limiters {
limiter.Forget(item)
}
}

View File

@ -0,0 +1,271 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package workqueue
import (
"container/heap"
"sync"
"time"
"k8s.io/apimachinery/pkg/util/clock"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
)
// DelayingInterface is an Interface that can Add an item at a later time. This makes it easier to
// requeue items after failures without ending up in a hot-loop.
type DelayingInterface interface {
Interface
// AddAfter adds an item to the workqueue after the indicated duration has passed
AddAfter(item interface{}, duration time.Duration)
}
// NewDelayingQueue constructs a new workqueue with delayed queuing ability
func NewDelayingQueue() DelayingInterface {
return NewDelayingQueueWithCustomClock(clock.RealClock{}, "")
}
// NewNamedDelayingQueue constructs a new named workqueue with delayed queuing ability
func NewNamedDelayingQueue(name string) DelayingInterface {
return NewDelayingQueueWithCustomClock(clock.RealClock{}, name)
}
// NewDelayingQueueWithCustomClock constructs a new named workqueue
// with ability to inject real or fake clock for testing purposes
func NewDelayingQueueWithCustomClock(clock clock.Clock, name string) DelayingInterface {
ret := &delayingType{
Interface: NewNamed(name),
clock: clock,
heartbeat: clock.NewTicker(maxWait),
stopCh: make(chan struct{}),
waitingForAddCh: make(chan *waitFor, 1000),
metrics: newRetryMetrics(name),
}
go ret.waitingLoop()
return ret
}
// delayingType wraps an Interface and provides delayed re-enquing
type delayingType struct {
Interface
// clock tracks time for delayed firing
clock clock.Clock
// stopCh lets us signal a shutdown to the waiting loop
stopCh chan struct{}
// stopOnce guarantees we only signal shutdown a single time
stopOnce sync.Once
// heartbeat ensures we wait no more than maxWait before firing
heartbeat clock.Ticker
// waitingForAddCh is a buffered channel that feeds waitingForAdd
waitingForAddCh chan *waitFor
// metrics counts the number of retries
metrics retryMetrics
}
// waitFor holds the data to add and the time it should be added
type waitFor struct {
data t
readyAt time.Time
// index in the priority queue (heap)
index int
}
// waitForPriorityQueue implements a priority queue for waitFor items.
//
// waitForPriorityQueue implements heap.Interface. The item occurring next in
// time (i.e., the item with the smallest readyAt) is at the root (index 0).
// Peek returns this minimum item at index 0. Pop returns the minimum item after
// it has been removed from the queue and placed at index Len()-1 by
// container/heap. Push adds an item at index Len(), and container/heap
// percolates it into the correct location.
type waitForPriorityQueue []*waitFor
func (pq waitForPriorityQueue) Len() int {
return len(pq)
}
func (pq waitForPriorityQueue) Less(i, j int) bool {
return pq[i].readyAt.Before(pq[j].readyAt)
}
func (pq waitForPriorityQueue) Swap(i, j int) {
pq[i], pq[j] = pq[j], pq[i]
pq[i].index = i
pq[j].index = j
}
// Push adds an item to the queue. Push should not be called directly; instead,
// use `heap.Push`.
func (pq *waitForPriorityQueue) Push(x interface{}) {
n := len(*pq)
item := x.(*waitFor)
item.index = n
*pq = append(*pq, item)
}
// Pop removes an item from the queue. Pop should not be called directly;
// instead, use `heap.Pop`.
func (pq *waitForPriorityQueue) Pop() interface{} {
n := len(*pq)
item := (*pq)[n-1]
item.index = -1
*pq = (*pq)[0:(n - 1)]
return item
}
// Peek returns the item at the beginning of the queue, without removing the
// item or otherwise mutating the queue. It is safe to call directly.
func (pq waitForPriorityQueue) Peek() interface{} {
return pq[0]
}
// ShutDown stops the queue. After the queue drains, the returned shutdown bool
// on Get() will be true. This method may be invoked more than once.
func (q *delayingType) ShutDown() {
q.stopOnce.Do(func() {
q.Interface.ShutDown()
close(q.stopCh)
q.heartbeat.Stop()
})
}
// AddAfter adds the given item to the work queue after the given delay
func (q *delayingType) AddAfter(item interface{}, duration time.Duration) {
// don't add if we're already shutting down
if q.ShuttingDown() {
return
}
q.metrics.retry()
// immediately add things with no delay
if duration <= 0 {
q.Add(item)
return
}
select {
case <-q.stopCh:
// unblock if ShutDown() is called
case q.waitingForAddCh <- &waitFor{data: item, readyAt: q.clock.Now().Add(duration)}:
}
}
// maxWait keeps a max bound on the wait time. It's just insurance against weird things happening.
// Checking the queue every 10 seconds isn't expensive and we know that we'll never end up with an
// expired item sitting for more than 10 seconds.
const maxWait = 10 * time.Second
// waitingLoop runs until the workqueue is shutdown and keeps a check on the list of items to be added.
func (q *delayingType) waitingLoop() {
defer utilruntime.HandleCrash()
// Make a placeholder channel to use when there are no items in our list
never := make(<-chan time.Time)
// Make a timer that expires when the item at the head of the waiting queue is ready
var nextReadyAtTimer clock.Timer
waitingForQueue := &waitForPriorityQueue{}
heap.Init(waitingForQueue)
waitingEntryByData := map[t]*waitFor{}
for {
if q.Interface.ShuttingDown() {
return
}
now := q.clock.Now()
// Add ready entries
for waitingForQueue.Len() > 0 {
entry := waitingForQueue.Peek().(*waitFor)
if entry.readyAt.After(now) {
break
}
entry = heap.Pop(waitingForQueue).(*waitFor)
q.Add(entry.data)
delete(waitingEntryByData, entry.data)
}
// Set up a wait for the first item's readyAt (if one exists)
nextReadyAt := never
if waitingForQueue.Len() > 0 {
if nextReadyAtTimer != nil {
nextReadyAtTimer.Stop()
}
entry := waitingForQueue.Peek().(*waitFor)
nextReadyAtTimer = q.clock.NewTimer(entry.readyAt.Sub(now))
nextReadyAt = nextReadyAtTimer.C()
}
select {
case <-q.stopCh:
return
case <-q.heartbeat.C():
// continue the loop, which will add ready items
case <-nextReadyAt:
// continue the loop, which will add ready items
case waitEntry := <-q.waitingForAddCh:
if waitEntry.readyAt.After(q.clock.Now()) {
insert(waitingForQueue, waitingEntryByData, waitEntry)
} else {
q.Add(waitEntry.data)
}
drained := false
for !drained {
select {
case waitEntry := <-q.waitingForAddCh:
if waitEntry.readyAt.After(q.clock.Now()) {
insert(waitingForQueue, waitingEntryByData, waitEntry)
} else {
q.Add(waitEntry.data)
}
default:
drained = true
}
}
}
}
}
// insert adds the entry to the priority queue, or updates the readyAt if it already exists in the queue
func insert(q *waitForPriorityQueue, knownEntries map[t]*waitFor, entry *waitFor) {
// if the entry already exists, update the time only if it would cause the item to be queued sooner
existing, exists := knownEntries[entry.data]
if exists {
if existing.readyAt.After(entry.readyAt) {
existing.readyAt = entry.readyAt
heap.Fix(q, existing.index)
}
return
}
heap.Push(q, entry)
knownEntries[entry.data] = entry
}

26
vendor/k8s.io/client-go/util/workqueue/doc.go generated vendored Normal file
View File

@ -0,0 +1,26 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package workqueue provides a simple queue that supports the following
// features:
// * Fair: items processed in the order in which they are added.
// * Stingy: a single item will not be processed multiple times concurrently,
// and if an item is added multiple times before it can be processed, it
// will only be processed once.
// * Multiple consumers and producers. In particular, it is allowed for an
// item to be reenqueued while it is being processed.
// * Shutdown notifications.
package workqueue // import "k8s.io/client-go/util/workqueue"

261
vendor/k8s.io/client-go/util/workqueue/metrics.go generated vendored Normal file
View File

@ -0,0 +1,261 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package workqueue
import (
"sync"
"time"
"k8s.io/apimachinery/pkg/util/clock"
)
// This file provides abstractions for setting the provider (e.g., prometheus)
// of metrics.
type queueMetrics interface {
add(item t)
get(item t)
done(item t)
updateUnfinishedWork()
}
// GaugeMetric represents a single numerical value that can arbitrarily go up
// and down.
type GaugeMetric interface {
Inc()
Dec()
}
// SettableGaugeMetric represents a single numerical value that can arbitrarily go up
// and down. (Separate from GaugeMetric to preserve backwards compatibility.)
type SettableGaugeMetric interface {
Set(float64)
}
// CounterMetric represents a single numerical value that only ever
// goes up.
type CounterMetric interface {
Inc()
}
// SummaryMetric captures individual observations.
type SummaryMetric interface {
Observe(float64)
}
// HistogramMetric counts individual observations.
type HistogramMetric interface {
Observe(float64)
}
type noopMetric struct{}
func (noopMetric) Inc() {}
func (noopMetric) Dec() {}
func (noopMetric) Set(float64) {}
func (noopMetric) Observe(float64) {}
// defaultQueueMetrics expects the caller to lock before setting any metrics.
type defaultQueueMetrics struct {
clock clock.Clock
// current depth of a workqueue
depth GaugeMetric
// total number of adds handled by a workqueue
adds CounterMetric
// how long an item stays in a workqueue
latency HistogramMetric
// how long processing an item from a workqueue takes
workDuration HistogramMetric
addTimes map[t]time.Time
processingStartTimes map[t]time.Time
// how long have current threads been working?
unfinishedWorkSeconds SettableGaugeMetric
longestRunningProcessor SettableGaugeMetric
}
func (m *defaultQueueMetrics) add(item t) {
if m == nil {
return
}
m.adds.Inc()
m.depth.Inc()
if _, exists := m.addTimes[item]; !exists {
m.addTimes[item] = m.clock.Now()
}
}
func (m *defaultQueueMetrics) get(item t) {
if m == nil {
return
}
m.depth.Dec()
m.processingStartTimes[item] = m.clock.Now()
if startTime, exists := m.addTimes[item]; exists {
m.latency.Observe(m.sinceInSeconds(startTime))
delete(m.addTimes, item)
}
}
func (m *defaultQueueMetrics) done(item t) {
if m == nil {
return
}
if startTime, exists := m.processingStartTimes[item]; exists {
m.workDuration.Observe(m.sinceInSeconds(startTime))
delete(m.processingStartTimes, item)
}
}
func (m *defaultQueueMetrics) updateUnfinishedWork() {
// Note that a summary metric would be better for this, but prometheus
// doesn't seem to have non-hacky ways to reset the summary metrics.
var total float64
var oldest float64
for _, t := range m.processingStartTimes {
age := m.sinceInSeconds(t)
total += age
if age > oldest {
oldest = age
}
}
m.unfinishedWorkSeconds.Set(total)
m.longestRunningProcessor.Set(oldest)
}
type noMetrics struct{}
func (noMetrics) add(item t) {}
func (noMetrics) get(item t) {}
func (noMetrics) done(item t) {}
func (noMetrics) updateUnfinishedWork() {}
// Gets the time since the specified start in seconds.
func (m *defaultQueueMetrics) sinceInSeconds(start time.Time) float64 {
return m.clock.Since(start).Seconds()
}
type retryMetrics interface {
retry()
}
type defaultRetryMetrics struct {
retries CounterMetric
}
func (m *defaultRetryMetrics) retry() {
if m == nil {
return
}
m.retries.Inc()
}
// MetricsProvider generates various metrics used by the queue.
type MetricsProvider interface {
NewDepthMetric(name string) GaugeMetric
NewAddsMetric(name string) CounterMetric
NewLatencyMetric(name string) HistogramMetric
NewWorkDurationMetric(name string) HistogramMetric
NewUnfinishedWorkSecondsMetric(name string) SettableGaugeMetric
NewLongestRunningProcessorSecondsMetric(name string) SettableGaugeMetric
NewRetriesMetric(name string) CounterMetric
}
type noopMetricsProvider struct{}
func (_ noopMetricsProvider) NewDepthMetric(name string) GaugeMetric {
return noopMetric{}
}
func (_ noopMetricsProvider) NewAddsMetric(name string) CounterMetric {
return noopMetric{}
}
func (_ noopMetricsProvider) NewLatencyMetric(name string) HistogramMetric {
return noopMetric{}
}
func (_ noopMetricsProvider) NewWorkDurationMetric(name string) HistogramMetric {
return noopMetric{}
}
func (_ noopMetricsProvider) NewUnfinishedWorkSecondsMetric(name string) SettableGaugeMetric {
return noopMetric{}
}
func (_ noopMetricsProvider) NewLongestRunningProcessorSecondsMetric(name string) SettableGaugeMetric {
return noopMetric{}
}
func (_ noopMetricsProvider) NewRetriesMetric(name string) CounterMetric {
return noopMetric{}
}
var globalMetricsFactory = queueMetricsFactory{
metricsProvider: noopMetricsProvider{},
}
type queueMetricsFactory struct {
metricsProvider MetricsProvider
onlyOnce sync.Once
}
func (f *queueMetricsFactory) setProvider(mp MetricsProvider) {
f.onlyOnce.Do(func() {
f.metricsProvider = mp
})
}
func (f *queueMetricsFactory) newQueueMetrics(name string, clock clock.Clock) queueMetrics {
mp := f.metricsProvider
if len(name) == 0 || mp == (noopMetricsProvider{}) {
return noMetrics{}
}
return &defaultQueueMetrics{
clock: clock,
depth: mp.NewDepthMetric(name),
adds: mp.NewAddsMetric(name),
latency: mp.NewLatencyMetric(name),
workDuration: mp.NewWorkDurationMetric(name),
unfinishedWorkSeconds: mp.NewUnfinishedWorkSecondsMetric(name),
longestRunningProcessor: mp.NewLongestRunningProcessorSecondsMetric(name),
addTimes: map[t]time.Time{},
processingStartTimes: map[t]time.Time{},
}
}
func newRetryMetrics(name string) retryMetrics {
var ret *defaultRetryMetrics
if len(name) == 0 {
return ret
}
return &defaultRetryMetrics{
retries: globalMetricsFactory.metricsProvider.NewRetriesMetric(name),
}
}
// SetProvider sets the metrics provider for all subsequently created work
// queues. Only the first call has an effect.
func SetProvider(metricsProvider MetricsProvider) {
globalMetricsFactory.setProvider(metricsProvider)
}

63
vendor/k8s.io/client-go/util/workqueue/parallelizer.go generated vendored Normal file
View File

@ -0,0 +1,63 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package workqueue
import (
"context"
"sync"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
)
type DoWorkPieceFunc func(piece int)
// ParallelizeUntil is a framework that allows for parallelizing N
// independent pieces of work until done or the context is canceled.
func ParallelizeUntil(ctx context.Context, workers, pieces int, doWorkPiece DoWorkPieceFunc) {
var stop <-chan struct{}
if ctx != nil {
stop = ctx.Done()
}
toProcess := make(chan int, pieces)
for i := 0; i < pieces; i++ {
toProcess <- i
}
close(toProcess)
if pieces < workers {
workers = pieces
}
wg := sync.WaitGroup{}
wg.Add(workers)
for i := 0; i < workers; i++ {
go func() {
defer utilruntime.HandleCrash()
defer wg.Done()
for piece := range toProcess {
select {
case <-stop:
return
default:
doWorkPiece(piece)
}
}
}()
}
wg.Wait()
}

212
vendor/k8s.io/client-go/util/workqueue/queue.go generated vendored Normal file
View File

@ -0,0 +1,212 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package workqueue
import (
"sync"
"time"
"k8s.io/apimachinery/pkg/util/clock"
)
type Interface interface {
Add(item interface{})
Len() int
Get() (item interface{}, shutdown bool)
Done(item interface{})
ShutDown()
ShuttingDown() bool
}
// New constructs a new work queue (see the package comment).
func New() *Type {
return NewNamed("")
}
func NewNamed(name string) *Type {
rc := clock.RealClock{}
return newQueue(
rc,
globalMetricsFactory.newQueueMetrics(name, rc),
defaultUnfinishedWorkUpdatePeriod,
)
}
func newQueue(c clock.Clock, metrics queueMetrics, updatePeriod time.Duration) *Type {
t := &Type{
clock: c,
dirty: set{},
processing: set{},
cond: sync.NewCond(&sync.Mutex{}),
metrics: metrics,
unfinishedWorkUpdatePeriod: updatePeriod,
}
go t.updateUnfinishedWorkLoop()
return t
}
const defaultUnfinishedWorkUpdatePeriod = 500 * time.Millisecond
// Type is a work queue (see the package comment).
type Type struct {
// queue defines the order in which we will work on items. Every
// element of queue should be in the dirty set and not in the
// processing set.
queue []t
// dirty defines all of the items that need to be processed.
dirty set
// Things that are currently being processed are in the processing set.
// These things may be simultaneously in the dirty set. When we finish
// processing something and remove it from this set, we'll check if
// it's in the dirty set, and if so, add it to the queue.
processing set
cond *sync.Cond
shuttingDown bool
metrics queueMetrics
unfinishedWorkUpdatePeriod time.Duration
clock clock.Clock
}
type empty struct{}
type t interface{}
type set map[t]empty
func (s set) has(item t) bool {
_, exists := s[item]
return exists
}
func (s set) insert(item t) {
s[item] = empty{}
}
func (s set) delete(item t) {
delete(s, item)
}
// Add marks item as needing processing.
func (q *Type) Add(item interface{}) {
q.cond.L.Lock()
defer q.cond.L.Unlock()
if q.shuttingDown {
return
}
if q.dirty.has(item) {
return
}
q.metrics.add(item)
q.dirty.insert(item)
if q.processing.has(item) {
return
}
q.queue = append(q.queue, item)
q.cond.Signal()
}
// Len returns the current queue length, for informational purposes only. You
// shouldn't e.g. gate a call to Add() or Get() on Len() being a particular
// value, that can't be synchronized properly.
func (q *Type) Len() int {
q.cond.L.Lock()
defer q.cond.L.Unlock()
return len(q.queue)
}
// Get blocks until it can return an item to be processed. If shutdown = true,
// the caller should end their goroutine. You must call Done with item when you
// have finished processing it.
func (q *Type) Get() (item interface{}, shutdown bool) {
q.cond.L.Lock()
defer q.cond.L.Unlock()
for len(q.queue) == 0 && !q.shuttingDown {
q.cond.Wait()
}
if len(q.queue) == 0 {
// We must be shutting down.
return nil, true
}
item, q.queue = q.queue[0], q.queue[1:]
q.metrics.get(item)
q.processing.insert(item)
q.dirty.delete(item)
return item, false
}
// Done marks item as done processing, and if it has been marked as dirty again
// while it was being processed, it will be re-added to the queue for
// re-processing.
func (q *Type) Done(item interface{}) {
q.cond.L.Lock()
defer q.cond.L.Unlock()
q.metrics.done(item)
q.processing.delete(item)
if q.dirty.has(item) {
q.queue = append(q.queue, item)
q.cond.Signal()
}
}
// ShutDown will cause q to ignore all new items added to it. As soon as the
// worker goroutines have drained the existing items in the queue, they will be
// instructed to exit.
func (q *Type) ShutDown() {
q.cond.L.Lock()
defer q.cond.L.Unlock()
q.shuttingDown = true
q.cond.Broadcast()
}
func (q *Type) ShuttingDown() bool {
q.cond.L.Lock()
defer q.cond.L.Unlock()
return q.shuttingDown
}
func (q *Type) updateUnfinishedWorkLoop() {
t := q.clock.NewTicker(q.unfinishedWorkUpdatePeriod)
defer t.Stop()
for range t.C() {
if !func() bool {
q.cond.L.Lock()
defer q.cond.L.Unlock()
if !q.shuttingDown {
q.metrics.updateUnfinishedWork()
return true
}
return false
}() {
return
}
}
}

View File

@ -0,0 +1,69 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package workqueue
// RateLimitingInterface is an interface that rate limits items being added to the queue.
type RateLimitingInterface interface {
DelayingInterface
// AddRateLimited adds an item to the workqueue after the rate limiter says it's ok
AddRateLimited(item interface{})
// Forget indicates that an item is finished being retried. Doesn't matter whether it's for perm failing
// or for success, we'll stop the rate limiter from tracking it. This only clears the `rateLimiter`, you
// still have to call `Done` on the queue.
Forget(item interface{})
// NumRequeues returns back how many times the item was requeued
NumRequeues(item interface{}) int
}
// NewRateLimitingQueue constructs a new workqueue with rateLimited queuing ability
// Remember to call Forget! If you don't, you may end up tracking failures forever.
func NewRateLimitingQueue(rateLimiter RateLimiter) RateLimitingInterface {
return &rateLimitingType{
DelayingInterface: NewDelayingQueue(),
rateLimiter: rateLimiter,
}
}
func NewNamedRateLimitingQueue(rateLimiter RateLimiter, name string) RateLimitingInterface {
return &rateLimitingType{
DelayingInterface: NewNamedDelayingQueue(name),
rateLimiter: rateLimiter,
}
}
// rateLimitingType wraps an Interface and provides rateLimited re-enquing
type rateLimitingType struct {
DelayingInterface
rateLimiter RateLimiter
}
// AddRateLimited AddAfter's the item based on the time when the rate limiter says it's ok
func (q *rateLimitingType) AddRateLimited(item interface{}) {
q.DelayingInterface.AddAfter(item, q.rateLimiter.When(item))
}
func (q *rateLimitingType) NumRequeues(item interface{}) int {
return q.rateLimiter.NumRequeues(item)
}
func (q *rateLimitingType) Forget(item interface{}) {
q.rateLimiter.Forget(item)
}

9
vendor/k8s.io/kubernetes/go.mod generated vendored
View File

@ -38,6 +38,7 @@ require (
github.com/containerd/typeurl v0.0.0-20190228175220-2a93cfde8c20 // indirect
github.com/containernetworking/cni v0.7.1
github.com/coredns/corefile-migration v1.0.6
github.com/coreos/go-oidc v2.1.0+incompatible
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e
github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea
github.com/cpuguy83/go-md2man v1.0.10
@ -164,7 +165,7 @@ require (
k8s.io/repo-infra v0.0.1-alpha.1
k8s.io/sample-apiserver v0.0.0
k8s.io/system-validators v1.0.4
k8s.io/utils v0.0.0-20200117235808-5f6fbceb4c31
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89
sigs.k8s.io/kustomize v2.0.3+incompatible
sigs.k8s.io/yaml v1.2.0
)
@ -563,7 +564,7 @@ replace (
k8s.io/sample-cli-plugin => ./staging/src/k8s.io/sample-cli-plugin
k8s.io/sample-controller => ./staging/src/k8s.io/sample-controller
k8s.io/system-validators => k8s.io/system-validators v1.0.4
k8s.io/utils => k8s.io/utils v0.0.0-20200117235808-5f6fbceb4c31
k8s.io/utils => k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89
modernc.org/cc => modernc.org/cc v1.0.0
modernc.org/golex => modernc.org/golex v1.0.0
modernc.org/mathutil => modernc.org/mathutil v1.0.0
@ -573,9 +574,9 @@ replace (
mvdan.cc/lint => mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b
mvdan.cc/unparam => mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34
rsc.io/pdf => rsc.io/pdf v0.1.1
sigs.k8s.io/apiserver-network-proxy/konnectivity-client => sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.5
sigs.k8s.io/apiserver-network-proxy/konnectivity-client => sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7
sigs.k8s.io/kustomize => sigs.k8s.io/kustomize v2.0.3+incompatible
sigs.k8s.io/structured-merge-diff/v3 => sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c
sigs.k8s.io/structured-merge-diff/v3 => sigs.k8s.io/structured-merge-diff/v3 v3.0.0
sigs.k8s.io/yaml => sigs.k8s.io/yaml v1.2.0
sourcegraph.com/sqs/pbtypes => sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4
vbom.ml/util => vbom.ml/util v0.0.0-20160121211510-db5cfe13f5cc

View File

@ -302,7 +302,6 @@ type PersistentVolumeSpec struct {
MountOptions []string
// volumeMode defines if a volume is intended to be used with a formatted filesystem
// or to remain in raw block state. Value of Filesystem is implied when not included in spec.
// This is a beta feature.
// +optional
VolumeMode *PersistentVolumeMode
// NodeAffinity defines constraints that limit what nodes this volume can be accessed from.
@ -416,17 +415,17 @@ type PersistentVolumeClaimSpec struct {
StorageClassName *string
// volumeMode defines what type of volume is required by the claim.
// Value of Filesystem is implied when not included in claim spec.
// This is a beta feature.
// +optional
VolumeMode *PersistentVolumeMode
// This field can be used to specify either:
// * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)
// * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot - Beta)
// * An existing PVC (PersistentVolumeClaim)
// In order to use either of these DataSource types, the appropriate feature gate
// must be enabled (VolumeSnapshotDataSource, VolumePVCDataSource)
// If the provisioner can support the specified data source, it will create
// a new volume based on the contents of the specified PVC or Snapshot.
// If the provisioner does not support the specified data source, the volume will
// * An existing custom resource/object that implements data population (Alpha)
// In order to use VolumeSnapshot object types, the appropriate feature gate
// must be enabled (VolumeSnapshotDataSource or AnyVolumeDataSource)
// If the provisioner or an external controller can support the specified data source,
// it will create a new volume based on the contents of the specified data source.
// If the specified data source is not supported, the volume will
// not be created and the failure will be reported as an event.
// In the future, we plan to support more data source types and the behavior
// of the provisioner may change.
@ -2057,7 +2056,6 @@ type Container struct {
// +optional
VolumeMounts []VolumeMount
// volumeDevices is the list of block devices to be used by the container.
// This is a beta feature.
// +optional
VolumeDevices []VolumeDevice
// +optional
@ -2787,6 +2785,22 @@ type Sysctl struct {
Value string
}
// PodFSGroupChangePolicy holds policies that will be used for applying fsGroup to a volume
// when volume is mounted.
type PodFSGroupChangePolicy string
const (
// FSGroupChangeOnRootMismatch indicates that volume's ownership and permissions will be changed
// only when permission and ownership of root directory does not match with expected
// permissions on the volume. This can help shorten the time it takes to change
// ownership and permissions of a volume.
FSGroupChangeOnRootMismatch PodFSGroupChangePolicy = "OnRootMismatch"
// FSGroupChangeAlways indicates that volume's ownership and permissions
// should always be changed whenever volume is mounted inside a Pod. This the default
// behavior.
FSGroupChangeAlways PodFSGroupChangePolicy = "Always"
)
// PodSecurityContext holds pod-level security attributes and common container settings.
// Some fields are also present in container.securityContext. Field values of
// container.securityContext take precedence over field values of PodSecurityContext.
@ -2866,6 +2880,14 @@ type PodSecurityContext struct {
// If unset, the Kubelet will not modify the ownership and permissions of any volume.
// +optional
FSGroup *int64
// fsGroupChangePolicy defines behavior of changing ownership and permission of the volume
// before being exposed inside Pod. This field will only apply to
// volume types which support fsGroup based ownership(and permissions).
// It will have no effect on ephemeral volume types such as: secret, configmaps
// and emptydir.
// Valid values are "OnRootMismatch" and "Always". If not specified defaults to "Always".
// +optional
FSGroupChangePolicy *PodFSGroupChangePolicy
// Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported
// sysctls (by the container runtime) might fail to launch.
// +optional
@ -2969,7 +2991,6 @@ type EphemeralContainerCommon struct {
// +optional
VolumeMounts []VolumeMount
// volumeDevices is the list of block devices to be used by the container.
// This is a beta feature.
// +optional
VolumeDevices []VolumeDevice
// Probes are not allowed for ephemeral containers.
@ -5096,14 +5117,12 @@ type SELinuxOptions struct {
// WindowsSecurityContextOptions contain Windows-specific options and credentials.
type WindowsSecurityContextOptions struct {
// GMSACredentialSpecName is the name of the GMSA credential spec to use.
// This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag.
// +optional
GMSACredentialSpecName *string
// GMSACredentialSpec is where the GMSA admission webhook
// (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the
// GMSA credential spec named by the GMSACredentialSpecName field.
// This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag.
// +optional
GMSACredentialSpec *string

View File

@ -3691,6 +3691,11 @@ func (in *PodSecurityContext) DeepCopyInto(out *PodSecurityContext) {
*out = new(int64)
**out = **in
}
if in.FSGroupChangePolicy != nil {
in, out := &in.FSGroupChangePolicy, &out.FSGroupChangePolicy
*out = new(PodFSGroupChangePolicy)
**out = **in
}
if in.Sysctls != nil {
in, out := &in.Sysctls, &out.Sysctls
*out = make([]Sysctl, len(*in))

View File

@ -24,6 +24,7 @@ import (
"encoding/hex"
"fmt"
"net"
"regexp"
"strings"
"k8s.io/apimachinery/pkg/api/resource"
@ -33,6 +34,11 @@ import (
"k8s.io/klog"
)
var (
classShowMatcher = regexp.MustCompile(`class htb (1:\d+)`)
classAndHandleMatcher = regexp.MustCompile(`filter parent 1:.*fh (\d+::\d+).*flowid (\d+:\d+)`)
)
// tcShaper provides an implementation of the Shaper interface on Linux using the 'tc' tool.
// In general, using this requires that the caller posses the NET_CAP_ADMIN capability, though if you
// do this within an container, it only requires the NS_CAPABLE capability for manipulations to that
@ -75,13 +81,13 @@ func (t *tcShaper) nextClassID() (int, error) {
if len(line) == 0 {
continue
}
parts := strings.Split(line, " ")
// expected tc line:
// class htb 1:1 root prio 0 rate 1000Kbit ceil 1000Kbit burst 1600b cburst 1600b
if len(parts) != 14 {
return -1, fmt.Errorf("unexpected output from tc: %s (%v)", scanner.Text(), parts)
matches := classShowMatcher.FindStringSubmatch(line)
if len(matches) != 2 {
return -1, fmt.Errorf("unexpected output from tc: %s (%v)", scanner.Text(), matches)
}
classes.Insert(parts[2])
classes.Insert(matches[1])
}
// Make sure it doesn't go forever
@ -153,13 +159,14 @@ func (t *tcShaper) findCIDRClass(cidr string) (classAndHandleList [][]string, fo
continue
}
if strings.Contains(line, spec) {
parts := strings.Split(filter, " ")
// expected tc line:
// filter parent 1: protocol ip pref 1 u32 fh 800::800 order 2048 key ht 800 bkt 0 flowid 1:1
if len(parts) != 19 {
return classAndHandleList, false, fmt.Errorf("unexpected output from tc: %s %d (%v)", filter, len(parts), parts)
// `filter parent 1: protocol ip pref 1 u32 fh 800::800 order 2048 key ht 800 bkt 0 flowid 1:1` (old version) or
// `filter parent 1: protocol ip pref 1 u32 chain 0 fh 800::800 order 2048 key ht 800 bkt 0 flowid 1:1 not_in_hw` (new version)
matches := classAndHandleMatcher.FindStringSubmatch(filter)
if len(matches) != 3 {
return classAndHandleList, false, fmt.Errorf("unexpected output from tc: %s %d (%v)", filter, len(matches), matches)
}
resultTmp := []string{parts[18], parts[9]}
resultTmp := []string{matches[2], matches[1]}
classAndHandleList = append(classAndHandleList, resultTmp)
}
}
@ -301,7 +308,6 @@ func (t *tcShaper) Reset(cidr string) error {
}
}
return nil
}
func (t *tcShaper) deleteInterface(class string) error {