Merge pull request #2233 from Random-Liu/update-cri

Update cri to v1.0.0-rc.0
This commit is contained in:
Derek McGowan 2018-03-27 00:06:34 -07:00 committed by GitHub
commit ea37521fda
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
177 changed files with 11092 additions and 5689 deletions

View File

@ -74,7 +74,8 @@ script:
- if [ "$GOOS" = "linux" ]; then sudo PATH=$PATH GOPATH=$GOPATH TESTFLAGS_PARALLEL=1 make integration ; fi - if [ "$GOOS" = "linux" ]; then sudo PATH=$PATH GOPATH=$GOPATH TESTFLAGS_PARALLEL=1 make integration ; fi
- if [ "$GOOS" = "linux" ]; then - if [ "$GOOS" = "linux" ]; then
sudo PATH=$PATH containerd -log-level debug &> /tmp/containerd-cri.log & sudo PATH=$PATH containerd -log-level debug &> /tmp/containerd-cri.log &
sudo PATH=$PATH GOPATH=$GOPATH critest --runtime-endpoint=/var/run/containerd/containerd.sock --ginkgo-flags=--nodes=8 validation ; sudo ctr version ;
sudo PATH=$PATH GOPATH=$GOPATH critest --runtime-endpoint=/var/run/containerd/containerd.sock --parallel=8 ;
exit_code=$? ; exit_code=$? ;
test $exit_code -ne 0 && cat /tmp/containerd-cri.log ; test $exit_code -ne 0 && cat /tmp/containerd-cri.log ;
sudo pkill containerd ; sudo pkill containerd ;

View File

@ -127,6 +127,8 @@ func testDaemonRuntimeRoot(t *testing.T, noShim bool) {
[plugins.linux] [plugins.linux]
no_shim = %v no_shim = %v
runtime_root = "%s" runtime_root = "%s"
[plugins.cri]
stream_server_port = "0"
`, noShim, runtimeRoot) `, noShim, runtimeRoot)
client, _, cleanup := newDaemonWithConfig(t, configTOML) client, _, cleanup := newDaemonWithConfig(t, configTOML)

View File

@ -42,6 +42,7 @@ bash -c 'cat >'$CNI_CONFIG_DIR'/10-containerd-net.conflist <<EOF
"bridge": "cni0", "bridge": "cni0",
"isGateway": true, "isGateway": true,
"ipMasq": true, "ipMasq": true,
"promiscMode": true,
"ipam": { "ipam": {
"type": "host-local", "type": "host-local",
"subnet": "10.88.0.0/16", "subnet": "10.88.0.0/16",

View File

@ -20,7 +20,8 @@
# #
set -eu -o pipefail set -eu -o pipefail
CRITEST_COMMIT=b184f9aefe60a4441330e615ee20634ee26474fb go get -u github.com/onsi/ginkgo/ginkgo
CRITEST_COMMIT=207e773f72fde8d8aed1447692d8f800a6686d6c
go get -d github.com/kubernetes-incubator/cri-tools/... go get -d github.com/kubernetes-incubator/cri-tools/...
cd $GOPATH/src/github.com/kubernetes-incubator/cri-tools cd $GOPATH/src/github.com/kubernetes-incubator/cri-tools
git checkout $CRITEST_COMMIT git checkout $CRITEST_COMMIT

View File

@ -43,17 +43,16 @@ github.com/gotestyourself/gotestyourself 44dbf532bbf5767611f6f2a61bded572e337010
github.com/google/go-cmp v0.1.0 github.com/google/go-cmp v0.1.0
# cri dependencies # cri dependencies
github.com/containerd/cri fd18145c4b01fffff53cbf350012abe7ff83ebe9 https://github.com/dmcgowan/cri-containerd github.com/containerd/cri v1.0.0-rc.0
github.com/containerd/go-cni f2d7272f12d045b16ed924f50e91f9f9cecc55a7
github.com/blang/semver v3.1.0 github.com/blang/semver v3.1.0
github.com/containernetworking/cni v0.6.0 github.com/containernetworking/cni v0.6.0
github.com/containernetworking/plugins v0.6.0 github.com/containernetworking/plugins v0.7.0
github.com/cri-o/ocicni 9b451e26eb7c694d564991fbf44f77d0afb9b03c
github.com/davecgh/go-spew v1.1.0 github.com/davecgh/go-spew v1.1.0
github.com/docker/distribution b38e5838b7b2f2ad48e06ec4b500011976080621 github.com/docker/distribution b38e5838b7b2f2ad48e06ec4b500011976080621
github.com/docker/docker 86f080cff0914e9694068ed78d503701667c4c00 github.com/docker/docker 86f080cff0914e9694068ed78d503701667c4c00
github.com/docker/spdystream 449fdfce4d962303d702fec724ef0ad181c92528 github.com/docker/spdystream 449fdfce4d962303d702fec724ef0ad181c92528
github.com/emicklei/go-restful ff4f55a206334ef123e4f79bbf348980da81ca46 github.com/emicklei/go-restful ff4f55a206334ef123e4f79bbf348980da81ca46
github.com/fsnotify/fsnotify 7d7316ed6e1ed2de075aab8dfc76de5d158d66e1
github.com/ghodss/yaml 73d445a93680fa1a78ae23a5839bad48f32ba1ee github.com/ghodss/yaml 73d445a93680fa1a78ae23a5839bad48f32ba1ee
github.com/golang/glog 44145f04b68cf362d9c4df2182967c2275eaefed github.com/golang/glog 44145f04b68cf362d9c4df2182967c2275eaefed
github.com/google/gofuzz 44d81051d367757e1c7c6a5a86423ece9afcf63c github.com/google/gofuzz 44d81051d367757e1c7c6a5a86423ece9afcf63c
@ -65,14 +64,15 @@ github.com/opencontainers/selinux 4a2974bf1ee960774ffd517717f1f45325af0206
github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0 github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0
github.com/spf13/pflag v1.0.0 github.com/spf13/pflag v1.0.0
github.com/tchap/go-patricia 5ad6cdb7538b0097d5598c7e57f0a24072adf7dc github.com/tchap/go-patricia 5ad6cdb7538b0097d5598c7e57f0a24072adf7dc
golang.org/x/crypto 49796115aa4b964c318aad4f3084fdb41e9aa067
golang.org/x/time f51c12702a4d776e4c1fa9b0fabab841babae631 golang.org/x/time f51c12702a4d776e4c1fa9b0fabab841babae631
gopkg.in/inf.v0 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4 gopkg.in/inf.v0 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4
gopkg.in/yaml.v2 53feefa2559fb8dfa8d81baad31be332c97d6c77 gopkg.in/yaml.v2 53feefa2559fb8dfa8d81baad31be332c97d6c77
k8s.io/api a1d6dce6736a6c75929bb75111e89077e35a5856 k8s.io/api 5584376ceeffeb13a2e98b5e9f0e9dab37de4bab
k8s.io/apimachinery 8259d997cf059cd83dc47e5f8074b7a7d7967c09 k8s.io/apimachinery fcb9a12f7875d01f8390b28faedc37dcf2e713b9
k8s.io/apiserver 8e45eac9dff86447a5c2effe6a3d2cba70121ebf k8s.io/apiserver 837069aa36757a586e4a8165f1ff5ca06170aa4a
k8s.io/client-go 33bd23f75b6de861994706a322b0afab824b2171 k8s.io/client-go 484f27892430b961df38fe6715cc396409207d9f
k8s.io/kubernetes 05944b1d2ca7f60b09762a330425108f48f6b603 k8s.io/kubernetes v1.10.0-rc.1
k8s.io/utils 258e2a2fa64568210fbd6267cf1d8fd87c3cb86e k8s.io/utils 258e2a2fa64568210fbd6267cf1d8fd87c3cb86e
# zfs dependencies # zfs dependencies

View File

@ -1,31 +1,43 @@
# cri-containerd # cri
<p align="center"> <p align="center">
<img src="https://github.com/kubernetes/kubernetes/blob/master/logo/logo.png" width="50" height="50"> <img src="https://github.com/kubernetes/kubernetes/blob/master/logo/logo.png" width="50" height="50">
<img src="https://github.com/containerd/containerd/blob/master/docs/images/containerd-dark.png" width="200" > <img src="https://github.com/containerd/containerd/blob/master/docs/images/containerd-dark.png" width="200" >
</p> </p>
[![Build Status](https://api.travis-ci.org/containerd/cri-containerd.svg?style=flat-square)](https://travis-ci.org/containerd/cri-containerd) *Note: The standalone `cri-containerd` binary is end-of-life. `cri-containerd` is
[![Go Report Card](https://goreportcard.com/badge/github.com/containerd/cri-containerd?style=flat-square)](https://goreportcard.com/report/github.com/containerd/cri-containerd) transitioning from a standalone binary that talks to containerd to a plugin within
containerd. This github branch is for the `cri` plugin. See
[standalone-cri-containerd branch](https://github.com/containerd/cri/tree/standalone-cri-containerd)
for information about the standalone version of `cri-containerd`.*
`cri-containerd` is a [containerd](https://containerd.io/) based implementation of Kubernetes [container runtime interface (CRI)](https://github.com/kubernetes/kubernetes/blob/master/pkg/kubelet/apis/cri/runtime/v1alpha2/api.proto). *Note: You need to [drain your node](https://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/) before upgrading from standalone `cri-containerd` to containerd with `cri` plugin.*
[![Build Status](https://api.travis-ci.org/containerd/cri.svg?style=flat-square)](https://travis-ci.org/containerd/cri)
[![Go Report Card](https://goreportcard.com/badge/github.com/containerd/cri)](https://goreportcard.com/report/github.com/containerd/cri)
`cri` is a [containerd](https://containerd.io/) plugin implementation of Kubernetes [container runtime interface (CRI)](https://github.com/kubernetes/kubernetes/blob/master/pkg/kubelet/apis/cri/runtime/v1alpha2/api.proto).
With it, you could run Kubernetes using containerd as the container runtime. With it, you could run Kubernetes using containerd as the container runtime.
![cri-containerd](./docs/cri-containerd.png) ![cri](./docs/cri.png)
## Current Status ## Current Status
`cri-containerd` is in beta: `cri` is a native plugin of containerd 1.1 and above. It is built into containerd and enabled by default.
`cri` is in GA:
* It is feature complete. * It is feature complete.
* It (the beta version) works with Kubernetes >= 1.9. * It (the GA version) works with Kubernetes 1.10 and above.
* It has passed all [CRI validation tests](https://github.com/kubernetes/community/blob/master/contributors/devel/cri-validation.md). * It has passed all [CRI validation tests](https://github.com/kubernetes/community/blob/master/contributors/devel/cri-validation.md).
* It has passed all regular [node e2e tests](https://github.com/kubernetes/community/blob/master/contributors/devel/e2e-node-tests.md). * It has passed all [node e2e tests](https://github.com/kubernetes/community/blob/master/contributors/devel/e2e-node-tests.md).
* It has passed all regular [e2e tests](https://github.com/kubernetes/community/blob/master/contributors/devel/e2e-tests.md). * It has passed all [e2e tests](https://github.com/kubernetes/community/blob/master/contributors/devel/e2e-tests.md).
See [test dashboard](https://k8s-testgrid.appspot.com/sig-node-containerd) See [test dashboard](https://k8s-testgrid.appspot.com/sig-node-containerd)
## Support Metrics ## Support Metrics
| CRI-Containerd Version | Kubernetes Version | | CRI-Containerd Version | Containerd Version | Kubernetes Version | CRI Version |
|:----------------------:|:------------------:| |:----------------------:|:------------------:|:------------------:|:-----------:|
| v1.0.0-alpha.x | 1.7, 1.8 | | v1.0.0-alpha.x | | 1.7, 1.8 | v1alpha1 |
| v1.0.0-beta.x | 1.9 | | v1.0.0-beta.x | | 1.9 | v1alpha1 |
| HEAD | 1.10+ | | End-Of-Life | v1.1 | 1.10+ | v1alpha2 |
| | HEAD | 1.10+ | v1alpha2 |
## Production Quality Cluster on GCE ## Production Quality Cluster on GCE
For a production quality cluster on GCE brought up with `kube-up.sh` refer [here](docs/kube-up.md). For a production quality cluster on GCE brought up with `kube-up.sh` refer [here](docs/kube-up.md).
## Installing with Ansible and Kubeadm ## Installing with Ansible and Kubeadm
@ -35,33 +47,33 @@ For non ansible users, you can download the `cri-containerd` release tarball and
kubernetes cluster using kubeadm as described [here](docs/installation.md). kubernetes cluster using kubeadm as described [here](docs/installation.md).
## Getting Started for Developers ## Getting Started for Developers
### Binary Dependencies and Specifications ### Binary Dependencies and Specifications
The current release of `cri-containerd` has the following dependencies: The current release of the `cri` plugin has the following dependencies:
* [containerd](https://github.com/containerd/containerd) * [containerd](https://github.com/containerd/containerd)
* [runc](https://github.com/opencontainers/runc) * [runc](https://github.com/opencontainers/runc)
* [CNI](https://github.com/containernetworking/cni) * [CNI](https://github.com/containernetworking/cni)
See [versions](./vendor.conf) of these dependencies `cri-containerd` is tested with. See [versions](./vendor.conf) of these dependencies `cri` is tested with.
As containerd and runc move to their respective general availability releases, As containerd and runc move to their respective general availability releases,
we will do our best to rebase/retest `cri-containerd` with these releases on a we will do our best to rebase/retest `cri` with these releases on a
weekly/monthly basis. Similarly, given that `cri-containerd` uses the Open weekly/monthly basis. Similarly, given that `cri` uses the Open
Container Initiative (OCI) [image](https://github.com/opencontainers/image-spec) Container Initiative (OCI) [image](https://github.com/opencontainers/image-spec)
and [runtime](https://github.com/opencontainers/runtime-spec) specifications, we and [runtime](https://github.com/opencontainers/runtime-spec) specifications, we
will also do our best to update `cri-containerd` to the latest releases of these will also do our best to update `cri` to the latest releases of these
specifications as appropriate. specifications as appropriate.
### Install Dependencies ### Install Dependencies
1. Install development libraries: 1. Install development libraries:
* **libseccomp development library.** Required by cri-containerd and runc seccomp support. `libseccomp-dev` (Ubuntu, Debian) / `libseccomp-devel` * **libseccomp development library.** Required by `cri` and runc seccomp support. `libseccomp-dev` (Ubuntu, Debian) / `libseccomp-devel`
(Fedora, CentOS, RHEL). On releases of Ubuntu <=Trusty and Debian <=jessie a (Fedora, CentOS, RHEL). On releases of Ubuntu <=Trusty and Debian <=jessie a
backport version of `libseccomp-dev` is required. See [travis.yml](.travis.yml) for an example on trusty. backport version of `libseccomp-dev` is required. See [travis.yml](.travis.yml) for an example on trusty.
* **libapparmor development library.** Required by cri-containerd and runc apparmor support. To use apparmor on Debian, Ubuntu, and related distributions the installation of `libapparmor-dev` is required. * **libapparmor development library.** Required by `cri` and runc apparmor support. To use apparmor on Debian, Ubuntu, and related distributions the installation of `libapparmor-dev` is required.
* **btrfs development library.** Required by containerd btrfs support. `btrfs-tools`(Ubuntu, Debian) / `btrfs-progs-devel`(Fedora, CentOS, RHEL) * **btrfs development library.** Required by containerd btrfs support. `btrfs-tools`(Ubuntu, Debian) / `btrfs-progs-devel`(Fedora, CentOS, RHEL)
2. Install other dependencies: 2. Install other dependencies:
* **`nsenter`**: Required by CNI and portforward. * **`nsenter`**: Required by portforward.
* **`socat`**: Required by portforward. * **`socat`**: Required by portforward.
3. Install and setup a go 1.10 development environment. 3. Install and setup a go 1.10 development environment.
4. Make a local clone of this repository. 4. Make a local clone of this repository.
5. Install binary dependencies by running the following command from your cloned `cri-containerd/` project directory: 5. Install binary dependencies by running the following command from your cloned `cri/` project directory:
```bash ```bash
# Note: install.deps installs the above mentioned runc, containerd, and CNI # Note: install.deps installs the above mentioned runc, containerd, and CNI
# binary dependencies. install.deps is only provided for general use and ease of # binary dependencies. install.deps is only provided for general use and ease of
@ -69,15 +81,18 @@ backport version of `libseccomp-dev` is required. See [travis.yml](.travis.yml)
# `cni`, please follow instructions in their documents. # `cni`, please follow instructions in their documents.
make install.deps make install.deps
``` ```
### Build and Install cri-containerd ### Build and Install `cri`
To build and install `cri-containerd` enter the following commands from your `cri-containerd` project directory: To build and install a version of containerd with the `cri` plugin, enter the
following commands from your `cri` project directory:
```bash ```bash
make make
sudo make install sudo make install
``` ```
*NOTE: The version of containerd built and installed from the `Makefile` is only for
testing purposes. The version tag carries the suffix "-TEST".*
#### Build Tags #### Build Tags
`cri-containerd` supports optional build tags for compiling support of various features. `cri` supports optional build tags for compiling support of various features.
To add build tags to the make option the `BUILDTAGS` variable must be set. To add build tags to the make option the `BUILD_TAGS` variable must be set.
```bash ```bash
make BUILD_TAGS='seccomp apparmor' make BUILD_TAGS='seccomp apparmor'
@ -88,31 +103,28 @@ make BUILD_TAGS='seccomp apparmor'
| seccomp | syscall filtering | libseccomp development library | | seccomp | syscall filtering | libseccomp development library |
| selinux | selinux process and mount labeling | <none> | | selinux | selinux process and mount labeling | <none> |
| apparmor | apparmor profile support | libapparmor development library | | apparmor | apparmor profile support | libapparmor development library |
### Validate Your cri-containerd Setup ### Validate Your `cri` Setup
A Kubernetes incubator project called [cri-tools](https://github.com/kubernetes-incubator/cri-tools) A Kubernetes incubator project called [cri-tools](https://github.com/kubernetes-incubator/cri-tools)
includes programs for exercising CRI implementations such as `cri-containerd`. includes programs for exercising CRI implementations such as the `cri` plugin.
More importantly, cri-tools includes the program `critest` which is used for running More importantly, cri-tools includes the program `critest` which is used for running
[CRI Validation Testing](https://github.com/kubernetes/community/blob/master/contributors/devel/cri-validation.md). [CRI Validation Testing](https://github.com/kubernetes/community/blob/master/contributors/devel/cri-validation.md).
Run the CRI Validation test to validate your installation of `cri-containerd`: Run the CRI Validation test to validate your installation of `containerd` with `cri` built in:
```bash ```bash
make test-cri make test-cri
``` ```
### Running a Kubernetes local cluster ### Running a Kubernetes local cluster
If you already have a working development environment for supported Kubernetes If you already have a working development environment for supported Kubernetes
version, you can try `cri-containerd` in a local cluster: version, you can try `cri` in a local cluster:
1. Start `containerd` as root in a first terminal: 1. Start the version of `containerd` with `cri` plugin that you built and installed
above as root in a first terminal:
```bash ```bash
sudo containerd sudo containerd
``` ```
2. Start `cri-containerd` as root in a second terminal: 2. From the Kubernetes project directory startup a local cluster using `containerd`:
```bash ```bash
sudo cri-containerd CONTAINER_RUNTIME=remote CONTAINER_RUNTIME_ENDPOINT='/run/containerd/containerd.sock' ./hack/local-up-cluster.sh
```
3. From the Kubernetes project directory startup a local cluster using `cri-containerd`:
```bash
CONTAINER_RUNTIME=remote CONTAINER_RUNTIME_ENDPOINT='/var/run/cri-containerd.sock' ./hack/local-up-cluster.sh
``` ```
### Test ### Test
See [here](./docs/testing.md) for information about test. See [here](./docs/testing.md) for information about test.

View File

@ -49,9 +49,9 @@ var loadCommand = cli.Command{
timeout = context.GlobalDuration("timeout") timeout = context.GlobalDuration("timeout")
cancel gocontext.CancelFunc cancel gocontext.CancelFunc
) )
cl, err := client.NewCRIContainerdClient(address, timeout) cl, err := client.NewCRIPluginClient(address, timeout)
if err != nil { if err != nil {
return fmt.Errorf("failed to create grpc client: %v", err) return errors.Wrap(err, "failed to create grpc client")
} }
if timeout > 0 { if timeout > 0 {
ctx, cancel = gocontext.WithTimeout(gocontext.Background(), timeout) ctx, cancel = gocontext.WithTimeout(gocontext.Background(), timeout)

View File

@ -64,12 +64,10 @@ func initCRIService(ic *plugin.InitContext) (interface{}, error) {
pluginConfig := ic.Config.(*criconfig.PluginConfig) pluginConfig := ic.Config.(*criconfig.PluginConfig)
c := criconfig.Config{ c := criconfig.Config{
PluginConfig: *pluginConfig, PluginConfig: *pluginConfig,
// This is a hack. We assume that containerd root directory
// is one level above plugin directory.
// TODO(random-liu): Expose containerd config to plugin.
ContainerdRootDir: filepath.Dir(ic.Root), ContainerdRootDir: filepath.Dir(ic.Root),
ContainerdEndpoint: ic.Address, ContainerdEndpoint: ic.Address,
RootDir: ic.Root, RootDir: ic.Root,
StateDir: ic.State,
} }
log.G(ctx).Infof("Start cri plugin with config %+v", c) log.G(ctx).Infof("Start cri plugin with config %+v", c)
@ -92,7 +90,7 @@ func initCRIService(ic *plugin.InitContext) (interface{}, error) {
return nil, errors.Wrap(err, "failed to create containerd client") return nil, errors.Wrap(err, "failed to create containerd client")
} }
s, err := server.NewCRIContainerdService(c, client) s, err := server.NewCRIService(c, client)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to create CRI service") return nil, errors.Wrap(err, "failed to create CRI service")
} }

View File

@ -31,4 +31,8 @@ const (
// SandboxID is the sandbox ID annotation // SandboxID is the sandbox ID annotation
SandboxID = "io.kubernetes.cri.sandbox-id" SandboxID = "io.kubernetes.cri.sandbox-id"
// UntrustedWorkload is the sandbox annotation for untrusted workload. Untrusted
// workload can only run on dedicated runtime for untrusted workload.
UntrustedWorkload = "io.kubernetes.cri.untrusted-workload"
) )

View File

@ -1,5 +1,5 @@
/* /*
Copyright 2017 The Kubernetes Authors. Copyright 2018 The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@ -13,7 +13,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
// Code generated by protoc-gen-gogo. // Code generated by protoc-gen-gogo.
// source: api.proto // source: api.proto
// DO NOT EDIT! // DO NOT EDIT!
@ -101,66 +100,66 @@ var _ grpc.ClientConn
// is compatible with the grpc package it is being compiled against. // is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4 const _ = grpc.SupportPackageIsVersion4
// Client API for CRIContainerdService service // Client API for CRIPluginService service
type CRIContainerdServiceClient interface { type CRIPluginServiceClient interface {
// LoadImage loads a image into containerd. // LoadImage loads a image into containerd.
LoadImage(ctx context.Context, in *LoadImageRequest, opts ...grpc.CallOption) (*LoadImageResponse, error) LoadImage(ctx context.Context, in *LoadImageRequest, opts ...grpc.CallOption) (*LoadImageResponse, error)
} }
type cRIContainerdServiceClient struct { type cRIPluginServiceClient struct {
cc *grpc.ClientConn cc *grpc.ClientConn
} }
func NewCRIContainerdServiceClient(cc *grpc.ClientConn) CRIContainerdServiceClient { func NewCRIPluginServiceClient(cc *grpc.ClientConn) CRIPluginServiceClient {
return &cRIContainerdServiceClient{cc} return &cRIPluginServiceClient{cc}
} }
func (c *cRIContainerdServiceClient) LoadImage(ctx context.Context, in *LoadImageRequest, opts ...grpc.CallOption) (*LoadImageResponse, error) { func (c *cRIPluginServiceClient) LoadImage(ctx context.Context, in *LoadImageRequest, opts ...grpc.CallOption) (*LoadImageResponse, error) {
out := new(LoadImageResponse) out := new(LoadImageResponse)
err := grpc.Invoke(ctx, "/api.v1.CRIContainerdService/LoadImage", in, out, c.cc, opts...) err := grpc.Invoke(ctx, "/api.v1.CRIPluginService/LoadImage", in, out, c.cc, opts...)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return out, nil return out, nil
} }
// Server API for CRIContainerdService service // Server API for CRIPluginService service
type CRIContainerdServiceServer interface { type CRIPluginServiceServer interface {
// LoadImage loads a image into containerd. // LoadImage loads a image into containerd.
LoadImage(context.Context, *LoadImageRequest) (*LoadImageResponse, error) LoadImage(context.Context, *LoadImageRequest) (*LoadImageResponse, error)
} }
func RegisterCRIContainerdServiceServer(s *grpc.Server, srv CRIContainerdServiceServer) { func RegisterCRIPluginServiceServer(s *grpc.Server, srv CRIPluginServiceServer) {
s.RegisterService(&_CRIContainerdService_serviceDesc, srv) s.RegisterService(&_CRIPluginService_serviceDesc, srv)
} }
func _CRIContainerdService_LoadImage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { func _CRIPluginService_LoadImage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(LoadImageRequest) in := new(LoadImageRequest)
if err := dec(in); err != nil { if err := dec(in); err != nil {
return nil, err return nil, err
} }
if interceptor == nil { if interceptor == nil {
return srv.(CRIContainerdServiceServer).LoadImage(ctx, in) return srv.(CRIPluginServiceServer).LoadImage(ctx, in)
} }
info := &grpc.UnaryServerInfo{ info := &grpc.UnaryServerInfo{
Server: srv, Server: srv,
FullMethod: "/api.v1.CRIContainerdService/LoadImage", FullMethod: "/api.v1.CRIPluginService/LoadImage",
} }
handler := func(ctx context.Context, req interface{}) (interface{}, error) { handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CRIContainerdServiceServer).LoadImage(ctx, req.(*LoadImageRequest)) return srv.(CRIPluginServiceServer).LoadImage(ctx, req.(*LoadImageRequest))
} }
return interceptor(ctx, in, info, handler) return interceptor(ctx, in, info, handler)
} }
var _CRIContainerdService_serviceDesc = grpc.ServiceDesc{ var _CRIPluginService_serviceDesc = grpc.ServiceDesc{
ServiceName: "api.v1.CRIContainerdService", ServiceName: "api.v1.CRIPluginService",
HandlerType: (*CRIContainerdServiceServer)(nil), HandlerType: (*CRIPluginServiceServer)(nil),
Methods: []grpc.MethodDesc{ Methods: []grpc.MethodDesc{
{ {
MethodName: "LoadImage", MethodName: "LoadImage",
Handler: _CRIContainerdService_LoadImage_Handler, Handler: _CRIPluginService_LoadImage_Handler,
}, },
}, },
Streams: []grpc.StreamDesc{}, Streams: []grpc.StreamDesc{},
@ -580,7 +579,7 @@ var (
func init() { proto.RegisterFile("api.proto", fileDescriptorApi) } func init() { proto.RegisterFile("api.proto", fileDescriptorApi) }
var fileDescriptorApi = []byte{ var fileDescriptorApi = []byte{
// 223 bytes of a gzipped FileDescriptorProto // 219 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4c, 0x2c, 0xc8, 0xd4, 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4c, 0x2c, 0xc8, 0xd4,
0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x03, 0x31, 0xcb, 0x0c, 0xa5, 0x74, 0xd3, 0x33, 0x4b, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x03, 0x31, 0xcb, 0x0c, 0xa5, 0x74, 0xd3, 0x33, 0x4b,
0x32, 0x4a, 0x93, 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0xd3, 0xf3, 0xd3, 0xf3, 0xf5, 0xc1, 0xd2, 0x49, 0x32, 0x4a, 0x93, 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0xd3, 0xf3, 0xd3, 0xf3, 0xf5, 0xc1, 0xd2, 0x49,
@ -588,11 +587,11 @@ var fileDescriptorApi = []byte{
0xa6, 0x78, 0xe6, 0x26, 0xa6, 0xa7, 0x06, 0xa5, 0x16, 0x96, 0xa6, 0x16, 0x97, 0x08, 0x49, 0x71, 0xa6, 0x78, 0xe6, 0x26, 0xa6, 0xa7, 0x06, 0xa5, 0x16, 0x96, 0xa6, 0x16, 0x97, 0x08, 0x49, 0x71,
0x71, 0xb8, 0x65, 0xe6, 0xa4, 0x06, 0x24, 0x96, 0x64, 0x48, 0x30, 0x2a, 0x30, 0x6a, 0x70, 0x06, 0x71, 0xb8, 0x65, 0xe6, 0xa4, 0x06, 0x24, 0x96, 0x64, 0x48, 0x30, 0x2a, 0x30, 0x6a, 0x70, 0x06,
0xc1, 0xf9, 0x4a, 0xda, 0x5c, 0x82, 0x48, 0xea, 0x8b, 0x0b, 0xf2, 0xf3, 0x8a, 0x53, 0x85, 0xc4, 0xc1, 0xf9, 0x4a, 0xda, 0x5c, 0x82, 0x48, 0xea, 0x8b, 0x0b, 0xf2, 0xf3, 0x8a, 0x53, 0x85, 0xc4,
0xb8, 0xd8, 0xc0, 0x02, 0xc5, 0x12, 0x8c, 0x0a, 0xcc, 0x1a, 0x9c, 0x41, 0x50, 0x9e, 0x51, 0x14, 0xb8, 0xd8, 0xc0, 0x02, 0xc5, 0x12, 0x8c, 0x0a, 0xcc, 0x1a, 0x9c, 0x41, 0x50, 0x9e, 0x51, 0x18,
0x97, 0x88, 0x73, 0x90, 0xa7, 0x73, 0x7e, 0x5e, 0x49, 0x62, 0x66, 0x5e, 0x6a, 0x51, 0x4a, 0x70, 0x97, 0x80, 0x73, 0x90, 0x67, 0x40, 0x4e, 0x69, 0x7a, 0x66, 0x5e, 0x70, 0x6a, 0x51, 0x59, 0x66,
0x6a, 0x51, 0x59, 0x66, 0x72, 0xaa, 0x90, 0x13, 0x17, 0x27, 0xdc, 0x10, 0x21, 0x09, 0x3d, 0x88, 0x72, 0xaa, 0x90, 0x13, 0x17, 0x27, 0xdc, 0x00, 0x21, 0x09, 0x3d, 0x88, 0xab, 0xf5, 0xd0, 0xdd,
0xcb, 0xf5, 0xd0, 0xdd, 0x21, 0x25, 0x89, 0x45, 0x06, 0x62, 0xa3, 0x12, 0x83, 0x93, 0xcc, 0x89, 0x20, 0x25, 0x89, 0x45, 0x06, 0x62, 0x9b, 0x12, 0x83, 0x93, 0xcc, 0x89, 0x87, 0x72, 0x8c, 0x37,
0x87, 0x72, 0x8c, 0x37, 0x1e, 0xca, 0x31, 0x34, 0x3c, 0x92, 0x63, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, 0x1e, 0xca, 0x31, 0x34, 0x3c, 0x92, 0x63, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6,
0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x27, 0x3c, 0x96, 0x63, 0x48, 0x62, 0x03, 0xfb, 0x07, 0x8f, 0xe4, 0x18, 0x27, 0x3c, 0x96, 0x63, 0x48, 0x62, 0x03, 0xfb, 0xcc, 0x18, 0x10, 0x00,
0xce, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x6a, 0xfe, 0x35, 0x81, 0x21, 0x01, 0x00, 0x00, 0x00, 0xff, 0xff, 0xfc, 0x6f, 0xec, 0xf4, 0x1d, 0x01, 0x00, 0x00,
} }

View File

@ -13,8 +13,8 @@ option (gogoproto.sizer_all) = true;
option (gogoproto.unmarshaler_all) = true; option (gogoproto.unmarshaler_all) = true;
option (gogoproto.goproto_unrecognized_all) = false; option (gogoproto.goproto_unrecognized_all) = false;
// CRIContainerdService defines non-CRI APIs for cri-containerd. // CRIPluginService defines non-CRI APIs for cri plugin.
service CRIContainerdService{ service CRIPluginService{
// LoadImage loads a image into containerd. // LoadImage loads a image into containerd.
rpc LoadImage(LoadImageRequest) returns (LoadImageResponse) {} rpc LoadImage(LoadImageRequest) returns (LoadImageResponse) {}
} }

View File

@ -17,21 +17,21 @@ limitations under the License.
package client package client
import ( import (
"fmt"
"time" "time"
"github.com/pkg/errors"
"google.golang.org/grpc" "google.golang.org/grpc"
"k8s.io/kubernetes/pkg/kubelet/util" "k8s.io/kubernetes/pkg/kubelet/util"
api "github.com/containerd/cri/pkg/api/v1" api "github.com/containerd/cri/pkg/api/v1"
) )
// NewCRIContainerdClient creates grpc client of cri-containerd // NewCRIPluginClient creates grpc client of cri plugin
// TODO(random-liu): Wrap grpc functions. // TODO(random-liu): Wrap grpc functions.
func NewCRIContainerdClient(endpoint string, timeout time.Duration) (api.CRIContainerdServiceClient, error) { func NewCRIPluginClient(endpoint string, timeout time.Duration) (api.CRIPluginServiceClient, error) {
addr, dialer, err := util.GetAddressAndDialer(endpoint) addr, dialer, err := util.GetAddressAndDialer(endpoint)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to get dialer: %v", err) return nil, errors.Wrap(err, "failed to get dialer")
} }
conn, err := grpc.Dial(addr, conn, err := grpc.Dial(addr,
grpc.WithBlock(), grpc.WithBlock(),
@ -41,7 +41,7 @@ func NewCRIContainerdClient(endpoint string, timeout time.Duration) (api.CRICont
grpc.WithDialer(dialer), grpc.WithDialer(dialer),
) )
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to dial: %v", err) return nil, errors.Wrap(err, "failed to dial")
} }
return api.NewCRIContainerdServiceClient(conn), nil return api.NewCRIPluginServiceClient(conn), nil
} }

View File

@ -18,35 +18,40 @@ package config
import "github.com/containerd/containerd" import "github.com/containerd/containerd"
// Runtime struct to contain the type(ID), engine, and root variables for a default runtime
// and a runtime for untrusted worload.
type Runtime struct {
// Type is the runtime type to use in containerd e.g. io.containerd.runtime.v1.linux
Type string `toml:"runtime_type" json:"runtimeType"`
// Engine is the name of the runtime engine used by containerd.
Engine string `toml:"runtime_engine" json:"runtimeEngine"`
// Root is the directory used by containerd for runtime state.
Root string `toml:"runtime_root" json:"runtimeRoot"`
}
// ContainerdConfig contains toml config related to containerd // ContainerdConfig contains toml config related to containerd
type ContainerdConfig struct { type ContainerdConfig struct {
// Snapshotter is the snapshotter used by containerd. // Snapshotter is the snapshotter used by containerd.
Snapshotter string `toml:"snapshotter" json:"snapshotter,omitempty"` Snapshotter string `toml:"snapshotter" json:"snapshotter"`
// Runtime is the runtime to use in containerd. We may support // DefaultRuntime is the runtime to use in containerd.
// other runtimes in the future. DefaultRuntime Runtime `toml:"default_runtime" json:"defaultRuntime"`
Runtime string `toml:"runtime" json:"runtime,omitempty"` // UntrustedWorkloadRuntime is a runtime to run untrusted workloads on it.
// RuntimeEngine is the name of the runtime engine used by containerd. UntrustedWorkloadRuntime Runtime `toml:"untrusted_workload_runtime" json:"untrustedWorkloadRuntime"`
// Containerd default should be "runc"
// We may support other runtime engines in the future.
RuntimeEngine string `toml:"runtime_engine" json:"runtimeEngine,omitempty"`
// RuntimeRoot is the directory used by containerd for runtime state.
// Containerd default should be "/run/containerd/runc"
RuntimeRoot string `toml:"runtime_root" json:"runtimeRoot,omitempty"`
} }
// CniConfig contains toml config related to cni // CniConfig contains toml config related to cni
type CniConfig struct { type CniConfig struct {
// NetworkPluginBinDir is the directory in which the binaries for the plugin is kept. // NetworkPluginBinDir is the directory in which the binaries for the plugin is kept.
NetworkPluginBinDir string `toml:"bin_dir" json:"binDir,omitempty"` NetworkPluginBinDir string `toml:"bin_dir" json:"binDir"`
// NetworkPluginConfDir is the directory in which the admin places a CNI conf. // NetworkPluginConfDir is the directory in which the admin places a CNI conf.
NetworkPluginConfDir string `toml:"conf_dir" json:"confDir,omitempty"` NetworkPluginConfDir string `toml:"conf_dir" json:"confDir"`
} }
// Mirror contains the config related to the registry mirror // Mirror contains the config related to the registry mirror
type Mirror struct { type Mirror struct {
// Endpoints are endpoints for a namespace. CRI plugin will try the endpoints // Endpoints are endpoints for a namespace. CRI plugin will try the endpoints
// one by one until a working one is found. // one by one until a working one is found.
Endpoints []string `toml:"endpoint" json:"endpoint,omitempty"` Endpoints []string `toml:"endpoint" json:"endpoint"`
// TODO (Abhi) We might need to add auth per namespace. Looks like // TODO (Abhi) We might need to add auth per namespace. Looks like
// image auth information is passed by kube itself. // image auth information is passed by kube itself.
} }
@ -54,33 +59,30 @@ type Mirror struct {
// Registry is registry settings configured // Registry is registry settings configured
type Registry struct { type Registry struct {
// Mirrors are namespace to mirror mapping for all namespaces. // Mirrors are namespace to mirror mapping for all namespaces.
Mirrors map[string]Mirror `toml:"mirrors" json:"mirrors,omitempty"` Mirrors map[string]Mirror `toml:"mirrors" json:"mirrors"`
} }
// PluginConfig contains toml config related to CRI plugin, // PluginConfig contains toml config related to CRI plugin,
// it is a subset of Config. // it is a subset of Config.
type PluginConfig struct { type PluginConfig struct {
// ContainerdConfig contains config related to containerd // ContainerdConfig contains config related to containerd
ContainerdConfig `toml:"containerd" json:"containerd,omitempty"` ContainerdConfig `toml:"containerd" json:"containerd"`
// CniConfig contains config related to cni // CniConfig contains config related to cni
CniConfig `toml:"cni" json:"cni,omitempty"` CniConfig `toml:"cni" json:"cni"`
// Registry contains config related to the registry // Registry contains config related to the registry
Registry `toml:"registry" json:"registry,omitempty"` Registry `toml:"registry" json:"registry"`
// StreamServerAddress is the ip address streaming server is listening on. // StreamServerAddress is the ip address streaming server is listening on.
StreamServerAddress string `toml:"stream_server_address" json:"streamServerAddress,omitempty"` StreamServerAddress string `toml:"stream_server_address" json:"streamServerAddress"`
// StreamServerPort is the port streaming server is listening on. // StreamServerPort is the port streaming server is listening on.
StreamServerPort string `toml:"stream_server_port" json:"streamServerPort,omitempty"` StreamServerPort string `toml:"stream_server_port" json:"streamServerPort"`
// EnableSelinux indicates to enable the selinux support. // EnableSelinux indicates to enable the selinux support.
EnableSelinux bool `toml:"enable_selinux" json:"enableSelinux,omitempty"` EnableSelinux bool `toml:"enable_selinux" json:"enableSelinux"`
// SandboxImage is the image used by sandbox container. // SandboxImage is the image used by sandbox container.
SandboxImage string `toml:"sandbox_image" json:"sandboxImage,omitempty"` SandboxImage string `toml:"sandbox_image" json:"sandboxImage"`
// StatsCollectPeriod is the period (in seconds) of snapshots stats collection. // StatsCollectPeriod is the period (in seconds) of snapshots stats collection.
StatsCollectPeriod int `toml:"stats_collect_period" json:"statsCollectPeriod,omitempty"` StatsCollectPeriod int `toml:"stats_collect_period" json:"statsCollectPeriod"`
// SystemdCgroup enables systemd cgroup support. // SystemdCgroup enables systemd cgroup support.
SystemdCgroup bool `toml:"systemd_cgroup" json:"systemdCgroup,omitempty"` SystemdCgroup bool `toml:"systemd_cgroup" json:"systemdCgroup"`
// EnableIPv6DAD enables IPv6 DAD.
// TODO(random-liu): Use optimistic_dad when it's GA.
EnableIPv6DAD bool `toml:"enable_ipv6_dad" json:"enableIPv6DAD,omitempty"`
} }
// Config contains all configurations for cri server. // Config contains all configurations for cri server.
@ -88,12 +90,14 @@ type Config struct {
// PluginConfig is the config for CRI plugin. // PluginConfig is the config for CRI plugin.
PluginConfig PluginConfig
// ContainerdRootDir is the root directory path for containerd. // ContainerdRootDir is the root directory path for containerd.
ContainerdRootDir string `json:"containerdRootDir,omitempty"` ContainerdRootDir string `json:"containerdRootDir"`
// ContainerdEndpoint is the containerd endpoint path. // ContainerdEndpoint is the containerd endpoint path.
ContainerdEndpoint string `json:"containerdEndpoint,omitempty"` ContainerdEndpoint string `json:"containerdEndpoint"`
// RootDir is the root directory path for managing cri-containerd files // RootDir is the root directory path for managing cri plugin files
// (metadata checkpoint etc.) // (metadata checkpoint etc.)
RootDir string `json:"rootDir,omitempty"` RootDir string `json:"rootDir"`
// StateDir is the root directory path for managing volatile pod/container data
StateDir string `json:"stateDir"`
} }
// DefaultConfig returns default configurations of cri plugin. // DefaultConfig returns default configurations of cri plugin.
@ -105,17 +109,18 @@ func DefaultConfig() PluginConfig {
}, },
ContainerdConfig: ContainerdConfig{ ContainerdConfig: ContainerdConfig{
Snapshotter: containerd.DefaultSnapshotter, Snapshotter: containerd.DefaultSnapshotter,
Runtime: "io.containerd.runtime.v1.linux", DefaultRuntime: Runtime{
RuntimeEngine: "", Type: "io.containerd.runtime.v1.linux",
RuntimeRoot: "", Engine: "",
Root: "",
},
}, },
StreamServerAddress: "", StreamServerAddress: "",
StreamServerPort: "10010", StreamServerPort: "10010",
EnableSelinux: false, EnableSelinux: false,
SandboxImage: "gcr.io/google_containers/pause:3.0", SandboxImage: "k8s.gcr.io/pause:3.1",
StatsCollectPeriod: 10, StatsCollectPeriod: 10,
SystemdCgroup: false, SystemdCgroup: false,
EnableIPv6DAD: false,
Registry: Registry{ Registry: Registry{
Mirrors: map[string]Mirror{ Mirrors: map[string]Mirror{
"docker.io": { "docker.io": {

View File

@ -30,6 +30,7 @@ import (
"github.com/containerd/containerd/content" "github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/images" "github.com/containerd/containerd/images"
"github.com/containerd/containerd/leases"
"github.com/containerd/containerd/log" "github.com/containerd/containerd/log"
"github.com/opencontainers/go-digest" "github.com/opencontainers/go-digest"
"github.com/opencontainers/image-spec/specs-go" "github.com/opencontainers/image-spec/specs-go"
@ -81,8 +82,15 @@ func Import(ctx context.Context, client *containerd.Client, reader io.Reader) (_
if err != nil { if err != nil {
return nil, err return nil, err
} }
// TODO(random-liu): Fix this after containerd client is fixed (containerd/containerd#2193) defer func() {
defer done(ctx) // nolint: errcheck deferCtx, deferCancel := ctrdutil.DeferContext()
defer deferCancel()
if err := done(deferCtx); err != nil {
// Get lease id from context still works after context is done.
leaseID, _ := leases.Lease(ctx)
log.G(ctx).WithError(err).Errorf("Failed to release lease %q", leaseID)
}
}()
cs := client.ContentStore() cs := client.ContentStore()
is := client.ImageService() is := client.ImageService()

View File

@ -271,7 +271,7 @@ func (r *containerdResolver) base(refspec reference.Spec) (*dockerBase, error) {
if urls, ok := r.registry[host]; ok { if urls, ok := r.registry[host]; ok {
urls, err := r.getV2Urls(urls, prefix) urls, err := r.getV2Urls(urls, prefix)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to fetch v2 urls: %v", err) return nil, errors.Wrap(err, "failed to fetch v2 urls")
} }
base = append(base, urls...) base = append(base, urls...)
} else if host == "docker.io" { } else if host == "docker.io" {
@ -434,7 +434,7 @@ func (r *dockerBase) setTokenAuth(ctx context.Context, params map[string]string)
realmURL, err := url.Parse(realm) realmURL, err := url.Parse(realm)
if err != nil { if err != nil {
return fmt.Errorf("invalid token auth challenge realm: %s", err) return errors.Wrap(err, "invalid token auth challenge realm")
} }
to := tokenOptions{ to := tokenOptions{
@ -444,7 +444,7 @@ func (r *dockerBase) setTokenAuth(ctx context.Context, params map[string]string)
to.scopes = getTokenScopes(ctx, params) to.scopes = getTokenScopes(ctx, params)
if len(to.scopes) == 0 { if len(to.scopes) == 0 {
return errors.Errorf("no scope specified for token auth challenge") return errors.New("no scope specified for token auth challenge")
} }
if r.secret != "" { if r.secret != "" {
// Credential information is provided, use oauth POST endpoint // Credential information is provided, use oauth POST endpoint
@ -517,7 +517,7 @@ func (r *dockerBase) fetchTokenWithOAuth(ctx context.Context, to tokenOptions) (
var tr postTokenResponse var tr postTokenResponse
if err = decoder.Decode(&tr); err != nil { if err = decoder.Decode(&tr); err != nil {
return "", fmt.Errorf("unable to decode token response: %s", err) return "", errors.Wrap(err, "unable to decode token response")
} }
return tr.AccessToken, nil return tr.AccessToken, nil
@ -569,7 +569,7 @@ func (r *dockerBase) getToken(ctx context.Context, to tokenOptions) (string, err
var tr getTokenResponse var tr getTokenResponse
if err = decoder.Decode(&tr); err != nil { if err = decoder.Decode(&tr); err != nil {
return "", fmt.Errorf("unable to decode token response: %s", err) return "", errors.Wrap(err, "unable to decode token response")
} }
// `access_token` is equivalent to `token` and if both are specified // `access_token` is equivalent to `token` and if both are specified
@ -591,7 +591,7 @@ func (r *containerdResolver) getV2Urls(urls []string, imagePath string) ([]url.U
for _, u := range urls { for _, u := range urls {
v2Url, err := url.Parse(u) v2Url, err := url.Parse(u)
if err != nil { if err != nil {
return nil, fmt.Errorf("Failed to parse url during getv2 urls: %+v, err:%s", u, err) return nil, errors.Wrapf(err, "failed to parse url during getv2 urls: %+v", u)
} }
v2Url.Path = path.Join("/v2", imagePath) v2Url.Path = path.Join("/v2", imagePath)
v2Urls = append(v2Urls, *v2Url) v2Urls = append(v2Urls, *v2Url)

View File

@ -25,6 +25,7 @@ import (
containerdmount "github.com/containerd/containerd/mount" containerdmount "github.com/containerd/containerd/mount"
"github.com/containerd/fifo" "github.com/containerd/fifo"
"github.com/docker/docker/pkg/mount" "github.com/docker/docker/pkg/mount"
"github.com/docker/docker/pkg/symlink"
"golang.org/x/net/context" "golang.org/x/net/context"
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
) )
@ -37,6 +38,7 @@ type OS interface {
OpenFifo(ctx context.Context, fn string, flag int, perm os.FileMode) (io.ReadWriteCloser, error) OpenFifo(ctx context.Context, fn string, flag int, perm os.FileMode) (io.ReadWriteCloser, error)
Stat(name string) (os.FileInfo, error) Stat(name string) (os.FileInfo, error)
ResolveSymbolicLink(name string) (string, error) ResolveSymbolicLink(name string) (string, error)
FollowSymlinkInScope(path, scope string) (string, error)
CopyFile(src, dest string, perm os.FileMode) error CopyFile(src, dest string, perm os.FileMode) error
WriteFile(filename string, data []byte, perm os.FileMode) error WriteFile(filename string, data []byte, perm os.FileMode) error
Mount(source string, target string, fstype string, flags uintptr, data string) error Mount(source string, target string, fstype string, flags uintptr, data string) error
@ -47,7 +49,7 @@ type OS interface {
// RealOS is used to dispatch the real system level operations. // RealOS is used to dispatch the real system level operations.
type RealOS struct{} type RealOS struct{}
// MkdirAll will will call os.MkdirAll to create a directory. // MkdirAll will call os.MkdirAll to create a directory.
func (RealOS) MkdirAll(path string, perm os.FileMode) error { func (RealOS) MkdirAll(path string, perm os.FileMode) error {
return os.MkdirAll(path, perm) return os.MkdirAll(path, perm)
} }
@ -79,7 +81,12 @@ func (RealOS) ResolveSymbolicLink(path string) (string, error) {
return filepath.EvalSymlinks(path) return filepath.EvalSymlinks(path)
} }
// CopyFile copys src file to dest file // FollowSymlinkInScope will call symlink.FollowSymlinkInScope.
func (RealOS) FollowSymlinkInScope(path, scope string) (string, error) {
return symlink.FollowSymlinkInScope(path, scope)
}
// CopyFile will copy src file to dest file
func (RealOS) CopyFile(src, dest string, perm os.FileMode) error { func (RealOS) CopyFile(src, dest string, perm os.FileMode) error {
in, err := os.Open(src) in, err := os.Open(src)
if err != nil { if err != nil {
@ -107,17 +114,21 @@ func (RealOS) Mount(source string, target string, fstype string, flags uintptr,
return unix.Mount(source, target, fstype, flags, data) return unix.Mount(source, target, fstype, flags, data)
} }
// Unmount will call unix.Unmount to unmount the file. The function doesn't // Unmount will call Unmount to unmount the file.
// return error if target is not mounted.
func (RealOS) Unmount(target string, flags int) error { func (RealOS) Unmount(target string, flags int) error {
// TODO(random-liu): Follow symlink to make sure the result is correct. return Unmount(target, flags)
if mounted, err := mount.Mounted(target); err != nil || !mounted {
return err
}
return unix.Unmount(target, flags)
} }
// LookupMount gets mount info of a given path. // LookupMount gets mount info of a given path.
func (RealOS) LookupMount(path string) (containerdmount.Info, error) { func (RealOS) LookupMount(path string) (containerdmount.Info, error) {
return containerdmount.Lookup(path) return containerdmount.Lookup(path)
} }
// Unmount will call unix.Unmount to unmount the file. The function doesn't
// return error if target is not mounted.
func Unmount(target string, flags int) error {
if mounted, err := mount.Mounted(target); err != nil || !mounted {
return err
}
return unix.Unmount(target, flags)
}

View File

@ -17,8 +17,9 @@ limitations under the License.
package registrar package registrar
import ( import (
"fmt"
"sync" "sync"
"github.com/pkg/errors"
) )
// Registrar stores one-to-one name<->key mappings. // Registrar stores one-to-one name<->key mappings.
@ -49,19 +50,19 @@ func (r *Registrar) Reserve(name, key string) error {
defer r.lock.Unlock() defer r.lock.Unlock()
if name == "" || key == "" { if name == "" || key == "" {
return fmt.Errorf("invalid name %q or key %q", name, key) return errors.Errorf("invalid name %q or key %q", name, key)
} }
if k, exists := r.nameToKey[name]; exists { if k, exists := r.nameToKey[name]; exists {
if k != key { if k != key {
return fmt.Errorf("name %q is reserved for %q", name, k) return errors.Errorf("name %q is reserved for %q", name, k)
} }
return nil return nil
} }
if n, exists := r.keyToName[key]; exists { if n, exists := r.keyToName[key]; exists {
if n != name { if n != name {
return fmt.Errorf("key %q is reserved for %q", key, n) return errors.Errorf("key %q is reserved for %q", key, n)
} }
return nil return nil
} }

View File

@ -17,10 +17,10 @@ limitations under the License.
package server package server
import ( import (
"fmt"
"io" "io"
"github.com/containerd/containerd" "github.com/containerd/containerd"
"github.com/pkg/errors"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"golang.org/x/net/context" "golang.org/x/net/context"
"k8s.io/client-go/tools/remotecommand" "k8s.io/client-go/tools/remotecommand"
@ -30,35 +30,35 @@ import (
) )
// Attach prepares a streaming endpoint to attach to a running container, and returns the address. // Attach prepares a streaming endpoint to attach to a running container, and returns the address.
func (c *criContainerdService) Attach(ctx context.Context, r *runtime.AttachRequest) (*runtime.AttachResponse, error) { func (c *criService) Attach(ctx context.Context, r *runtime.AttachRequest) (*runtime.AttachResponse, error) {
cntr, err := c.containerStore.Get(r.GetContainerId()) cntr, err := c.containerStore.Get(r.GetContainerId())
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to find container in store: %v", err) return nil, errors.Wrap(err, "failed to find container in store")
} }
state := cntr.Status.Get().State() state := cntr.Status.Get().State()
if state != runtime.ContainerState_CONTAINER_RUNNING { if state != runtime.ContainerState_CONTAINER_RUNNING {
return nil, fmt.Errorf("container is in %s state", criContainerStateToString(state)) return nil, errors.Errorf("container is in %s state", criContainerStateToString(state))
} }
return c.streamServer.GetAttach(r) return c.streamServer.GetAttach(r)
} }
func (c *criContainerdService) attachContainer(ctx context.Context, id string, stdin io.Reader, stdout, stderr io.WriteCloser, func (c *criService) attachContainer(ctx context.Context, id string, stdin io.Reader, stdout, stderr io.WriteCloser,
tty bool, resize <-chan remotecommand.TerminalSize) error { tty bool, resize <-chan remotecommand.TerminalSize) error {
// Get container from our container store. // Get container from our container store.
cntr, err := c.containerStore.Get(id) cntr, err := c.containerStore.Get(id)
if err != nil { if err != nil {
return fmt.Errorf("failed to find container %q in store: %v", id, err) return errors.Wrapf(err, "failed to find container %q in store", id)
} }
id = cntr.ID id = cntr.ID
state := cntr.Status.Get().State() state := cntr.Status.Get().State()
if state != runtime.ContainerState_CONTAINER_RUNNING { if state != runtime.ContainerState_CONTAINER_RUNNING {
return fmt.Errorf("container is in %s state", criContainerStateToString(state)) return errors.Errorf("container is in %s state", criContainerStateToString(state))
} }
task, err := cntr.Container.Task(ctx, nil) task, err := cntr.Container.Task(ctx, nil)
if err != nil { if err != nil {
return fmt.Errorf("failed to load task: %v", err) return errors.Wrap(err, "failed to load task")
} }
handleResizing(resize, func(size remotecommand.TerminalSize) { handleResizing(resize, func(size remotecommand.TerminalSize) {
if err := task.Resize(ctx, uint32(size.Width), uint32(size.Height)); err != nil { if err := task.Resize(ctx, uint32(size.Width), uint32(size.Height)); err != nil {

View File

@ -17,7 +17,6 @@ limitations under the License.
package server package server
import ( import (
"fmt"
"os" "os"
"path/filepath" "path/filepath"
"strings" "strings"
@ -38,6 +37,7 @@ import (
"github.com/opencontainers/runtime-tools/generate" "github.com/opencontainers/runtime-tools/generate"
"github.com/opencontainers/runtime-tools/validate" "github.com/opencontainers/runtime-tools/validate"
"github.com/opencontainers/selinux/go-selinux/label" "github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/syndtr/gocapability/capability" "github.com/syndtr/gocapability/capability"
"golang.org/x/net/context" "golang.org/x/net/context"
@ -73,17 +73,17 @@ func init() {
} }
// CreateContainer creates a new container in the given PodSandbox. // CreateContainer creates a new container in the given PodSandbox.
func (c *criContainerdService) CreateContainer(ctx context.Context, r *runtime.CreateContainerRequest) (_ *runtime.CreateContainerResponse, retErr error) { func (c *criService) CreateContainer(ctx context.Context, r *runtime.CreateContainerRequest) (_ *runtime.CreateContainerResponse, retErr error) {
config := r.GetConfig() config := r.GetConfig()
sandboxConfig := r.GetSandboxConfig() sandboxConfig := r.GetSandboxConfig()
sandbox, err := c.sandboxStore.Get(r.GetPodSandboxId()) sandbox, err := c.sandboxStore.Get(r.GetPodSandboxId())
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to find sandbox id %q: %v", r.GetPodSandboxId(), err) return nil, errors.Wrapf(err, "failed to find sandbox id %q", r.GetPodSandboxId())
} }
sandboxID := sandbox.ID sandboxID := sandbox.ID
s, err := sandbox.Container.Task(ctx, nil) s, err := sandbox.Container.Task(ctx, nil)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to get sandbox container task: %v", err) return nil, errors.Wrap(err, "failed to get sandbox container task")
} }
sandboxPid := s.Pid() sandboxPid := s.Pid()
@ -94,7 +94,7 @@ func (c *criContainerdService) CreateContainer(ctx context.Context, r *runtime.C
name := makeContainerName(config.GetMetadata(), sandboxConfig.GetMetadata()) name := makeContainerName(config.GetMetadata(), sandboxConfig.GetMetadata())
logrus.Debugf("Generated id %q for container %q", id, name) logrus.Debugf("Generated id %q for container %q", id, name)
if err = c.containerNameIndex.Reserve(name, id); err != nil { if err = c.containerNameIndex.Reserve(name, id); err != nil {
return nil, fmt.Errorf("failed to reserve container name %q: %v", name, err) return nil, errors.Wrapf(err, "failed to reserve container name %q", name)
} }
defer func() { defer func() {
// Release the name if the function returns with an error. // Release the name if the function returns with an error.
@ -116,17 +116,28 @@ func (c *criContainerdService) CreateContainer(ctx context.Context, r *runtime.C
imageRef := config.GetImage().GetImage() imageRef := config.GetImage().GetImage()
image, err := c.localResolve(ctx, imageRef) image, err := c.localResolve(ctx, imageRef)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to resolve image %q: %v", imageRef, err) return nil, errors.Wrapf(err, "failed to resolve image %q", imageRef)
} }
if image == nil { if image == nil {
return nil, fmt.Errorf("image %q not found", imageRef) return nil, errors.Errorf("image %q not found", imageRef)
} }
// Run container using the same runtime with sandbox.
sandboxInfo, err := sandbox.Container.Info(ctx)
if err != nil {
return nil, errors.Wrapf(err, "failed to get sandbox %q info", sandboxID)
}
ociRuntime, err := getRuntimeConfigFromContainerInfo(sandboxInfo)
if err != nil {
return nil, errors.Wrap(err, "failed to get OCI runtime")
}
logrus.Debugf("Use OCI %+v for container %q", ociRuntime, id)
// Create container root directory. // Create container root directory.
containerRootDir := getContainerRootDir(c.config.RootDir, id) containerRootDir := c.getContainerRootDir(id)
if err = c.os.MkdirAll(containerRootDir, 0755); err != nil { if err = c.os.MkdirAll(containerRootDir, 0755); err != nil {
return nil, fmt.Errorf("failed to create container root directory %q: %v", return nil, errors.Wrapf(err, "failed to create container root directory %q",
containerRootDir, err) containerRootDir)
} }
defer func() { defer func() {
if retErr != nil { if retErr != nil {
@ -137,16 +148,30 @@ func (c *criContainerdService) CreateContainer(ctx context.Context, r *runtime.C
} }
} }
}() }()
volatileContainerRootDir := c.getVolatileContainerRootDir(id)
if err = c.os.MkdirAll(volatileContainerRootDir, 0755); err != nil {
return nil, errors.Wrapf(err, "failed to create volatile container root directory %q",
volatileContainerRootDir)
}
defer func() {
if retErr != nil {
// Cleanup the volatile container root directory.
if err = c.os.RemoveAll(volatileContainerRootDir); err != nil {
logrus.WithError(err).Errorf("Failed to remove volatile container root directory %q",
volatileContainerRootDir)
}
}
}()
// Create container volumes mounts. // Create container volumes mounts.
volumeMounts := c.generateVolumeMounts(containerRootDir, config.GetMounts(), &image.ImageSpec.Config) volumeMounts := c.generateVolumeMounts(containerRootDir, config.GetMounts(), &image.ImageSpec.Config)
// Generate container runtime spec. // Generate container runtime spec.
mounts := c.generateContainerMounts(getSandboxRootDir(c.config.RootDir, sandboxID), config) mounts := c.generateContainerMounts(sandboxID, config)
spec, err := c.generateContainerSpec(id, sandboxID, sandboxPid, config, sandboxConfig, &image.ImageSpec.Config, append(mounts, volumeMounts...)) spec, err := c.generateContainerSpec(id, sandboxID, sandboxPid, config, sandboxConfig, &image.ImageSpec.Config, append(mounts, volumeMounts...))
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to generate container %q spec: %v", id, err) return nil, errors.Wrapf(err, "failed to generate container %q spec", id)
} }
logrus.Debugf("Container %q spec: %#+v", id, spew.NewFormatter(spec)) logrus.Debugf("Container %q spec: %#+v", id, spew.NewFormatter(spec))
@ -177,9 +202,9 @@ func (c *criContainerdService) CreateContainer(ctx context.Context, r *runtime.C
} }
containerIO, err := cio.NewContainerIO(id, containerIO, err := cio.NewContainerIO(id,
cio.WithNewFIFOs(containerRootDir, config.GetTty(), config.GetStdin())) cio.WithNewFIFOs(volatileContainerRootDir, config.GetTty(), config.GetStdin()))
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to create container io: %v", err) return nil, errors.Wrap(err, "failed to create container io")
} }
defer func() { defer func() {
if retErr != nil { if retErr != nil {
@ -206,7 +231,7 @@ func (c *criContainerdService) CreateContainer(ctx context.Context, r *runtime.C
securityContext.GetPrivileged(), securityContext.GetPrivileged(),
c.apparmorEnabled) c.apparmorEnabled)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to generate apparmor spec opts: %v", err) return nil, errors.Wrap(err, "failed to generate apparmor spec opts")
} }
if apparmorSpecOpts != nil { if apparmorSpecOpts != nil {
specOpts = append(specOpts, apparmorSpecOpts) specOpts = append(specOpts, apparmorSpecOpts)
@ -217,7 +242,7 @@ func (c *criContainerdService) CreateContainer(ctx context.Context, r *runtime.C
securityContext.GetPrivileged(), securityContext.GetPrivileged(),
c.seccompEnabled) c.seccompEnabled)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to generate seccomp spec opts: %v", err) return nil, errors.Wrap(err, "failed to generate seccomp spec opts")
} }
if seccompSpecOpts != nil { if seccompSpecOpts != nil {
specOpts = append(specOpts, seccompSpecOpts) specOpts = append(specOpts, seccompSpecOpts)
@ -227,16 +252,16 @@ func (c *criContainerdService) CreateContainer(ctx context.Context, r *runtime.C
opts = append(opts, opts = append(opts,
containerd.WithSpec(spec, specOpts...), containerd.WithSpec(spec, specOpts...),
containerd.WithRuntime( containerd.WithRuntime(
c.config.ContainerdConfig.Runtime, ociRuntime.Type,
&runctypes.RuncOptions{ &runctypes.RuncOptions{
Runtime: c.config.ContainerdConfig.RuntimeEngine, Runtime: ociRuntime.Engine,
RuntimeRoot: c.config.ContainerdConfig.RuntimeRoot, RuntimeRoot: ociRuntime.Root,
SystemdCgroup: c.config.SystemdCgroup}), // TODO (mikebrow): add CriuPath when we add support for pause SystemdCgroup: c.config.SystemdCgroup}), // TODO (mikebrow): add CriuPath when we add support for pause
containerd.WithContainerLabels(containerLabels), containerd.WithContainerLabels(containerLabels),
containerd.WithContainerExtension(containerMetadataExtension, &meta)) containerd.WithContainerExtension(containerMetadataExtension, &meta))
var cntr containerd.Container var cntr containerd.Container
if cntr, err = c.client.NewContainer(ctx, id, opts...); err != nil { if cntr, err = c.client.NewContainer(ctx, id, opts...); err != nil {
return nil, fmt.Errorf("failed to create containerd container: %v", err) return nil, errors.Wrap(err, "failed to create containerd container")
} }
defer func() { defer func() {
if retErr != nil { if retErr != nil {
@ -255,8 +280,7 @@ func (c *criContainerdService) CreateContainer(ctx context.Context, r *runtime.C
containerstore.WithContainerIO(containerIO), containerstore.WithContainerIO(containerIO),
) )
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to create internal container object for %q: %v", return nil, errors.Wrapf(err, "failed to create internal container object for %q", id)
id, err)
} }
defer func() { defer func() {
if retErr != nil { if retErr != nil {
@ -269,13 +293,13 @@ func (c *criContainerdService) CreateContainer(ctx context.Context, r *runtime.C
// Add container into container store. // Add container into container store.
if err := c.containerStore.Add(container); err != nil { if err := c.containerStore.Add(container); err != nil {
return nil, fmt.Errorf("failed to add container %q into store: %v", id, err) return nil, errors.Wrapf(err, "failed to add container %q into store", id)
} }
return &runtime.CreateContainerResponse{ContainerId: id}, nil return &runtime.CreateContainerResponse{ContainerId: id}, nil
} }
func (c *criContainerdService) generateContainerSpec(id string, sandboxID string, sandboxPid uint32, config *runtime.ContainerConfig, func (c *criService) generateContainerSpec(id string, sandboxID string, sandboxPid uint32, config *runtime.ContainerConfig,
sandboxConfig *runtime.PodSandboxConfig, imageConfig *imagespec.ImageConfig, extraMounts []*runtime.Mount) (*runtimespec.Spec, error) { sandboxConfig *runtime.PodSandboxConfig, imageConfig *imagespec.ImageConfig, extraMounts []*runtime.Mount) (*runtimespec.Spec, error) {
// Creates a spec Generator with the default spec. // Creates a spec Generator with the default spec.
spec, err := defaultRuntimeSpec(id) spec, err := defaultRuntimeSpec(id)
@ -316,30 +340,30 @@ func (c *criContainerdService) generateContainerSpec(id string, sandboxID string
selinuxOpt := securityContext.GetSelinuxOptions() selinuxOpt := securityContext.GetSelinuxOptions()
processLabel, mountLabel, err := initSelinuxOpts(selinuxOpt) processLabel, mountLabel, err := initSelinuxOpts(selinuxOpt)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to init selinux options %+v: %v", securityContext.GetSelinuxOptions(), err) return nil, errors.Wrapf(err, "failed to init selinux options %+v", securityContext.GetSelinuxOptions())
} }
// Add extra mounts first so that CRI specified mounts can override. // Add extra mounts first so that CRI specified mounts can override.
mounts := append(extraMounts, config.GetMounts()...) mounts := append(extraMounts, config.GetMounts()...)
if err := c.addOCIBindMounts(&g, mounts, mountLabel); err != nil { if err := c.addOCIBindMounts(&g, mounts, mountLabel); err != nil {
return nil, fmt.Errorf("failed to set OCI bind mounts %+v: %v", mounts, err) return nil, errors.Wrapf(err, "failed to set OCI bind mounts %+v", mounts)
} }
if securityContext.GetPrivileged() { if securityContext.GetPrivileged() {
if !sandboxConfig.GetLinux().GetSecurityContext().GetPrivileged() { if !sandboxConfig.GetLinux().GetSecurityContext().GetPrivileged() {
return nil, fmt.Errorf("no privileged container allowed in sandbox") return nil, errors.New("no privileged container allowed in sandbox")
} }
if err := setOCIPrivileged(&g, config); err != nil { if err := setOCIPrivileged(&g, config); err != nil {
return nil, err return nil, err
} }
} else { // not privileged } else { // not privileged
if err := c.addOCIDevices(&g, config.GetDevices()); err != nil { if err := c.addOCIDevices(&g, config.GetDevices()); err != nil {
return nil, fmt.Errorf("failed to set devices mapping %+v: %v", config.GetDevices(), err) return nil, errors.Wrapf(err, "failed to set devices mapping %+v", config.GetDevices())
} }
if err := setOCICapabilities(&g, securityContext.GetCapabilities()); err != nil { if err := setOCICapabilities(&g, securityContext.GetCapabilities()); err != nil {
return nil, fmt.Errorf("failed to set capabilities %+v: %v", return nil, errors.Wrapf(err, "failed to set capabilities %+v",
securityContext.GetCapabilities(), err) securityContext.GetCapabilities())
} }
} }
@ -378,7 +402,7 @@ func (c *criContainerdService) generateContainerSpec(id string, sandboxID string
// generateVolumeMounts sets up image volumes for container. Rely on the removal of container // generateVolumeMounts sets up image volumes for container. Rely on the removal of container
// root directory to do cleanup. Note that image volume will be skipped, if there is criMounts // root directory to do cleanup. Note that image volume will be skipped, if there is criMounts
// specified with the same destination. // specified with the same destination.
func (c *criContainerdService) generateVolumeMounts(containerRootDir string, criMounts []*runtime.Mount, config *imagespec.ImageConfig) []*runtime.Mount { func (c *criService) generateVolumeMounts(containerRootDir string, criMounts []*runtime.Mount, config *imagespec.ImageConfig) []*runtime.Mount {
if len(config.Volumes) == 0 { if len(config.Volumes) == 0 {
return nil return nil
} }
@ -406,13 +430,13 @@ func (c *criContainerdService) generateVolumeMounts(containerRootDir string, cri
// generateContainerMounts sets up necessary container mounts including /dev/shm, /etc/hosts // generateContainerMounts sets up necessary container mounts including /dev/shm, /etc/hosts
// and /etc/resolv.conf. // and /etc/resolv.conf.
func (c *criContainerdService) generateContainerMounts(sandboxRootDir string, config *runtime.ContainerConfig) []*runtime.Mount { func (c *criService) generateContainerMounts(sandboxID string, config *runtime.ContainerConfig) []*runtime.Mount {
var mounts []*runtime.Mount var mounts []*runtime.Mount
securityContext := config.GetLinux().GetSecurityContext() securityContext := config.GetLinux().GetSecurityContext()
if !isInCRIMounts(etcHosts, config.GetMounts()) { if !isInCRIMounts(etcHosts, config.GetMounts()) {
mounts = append(mounts, &runtime.Mount{ mounts = append(mounts, &runtime.Mount{
ContainerPath: etcHosts, ContainerPath: etcHosts,
HostPath: getSandboxHosts(sandboxRootDir), HostPath: c.getSandboxHosts(sandboxID),
Readonly: securityContext.GetReadonlyRootfs(), Readonly: securityContext.GetReadonlyRootfs(),
}) })
} }
@ -422,13 +446,13 @@ func (c *criContainerdService) generateContainerMounts(sandboxRootDir string, co
if !isInCRIMounts(resolvConfPath, config.GetMounts()) { if !isInCRIMounts(resolvConfPath, config.GetMounts()) {
mounts = append(mounts, &runtime.Mount{ mounts = append(mounts, &runtime.Mount{
ContainerPath: resolvConfPath, ContainerPath: resolvConfPath,
HostPath: getResolvPath(sandboxRootDir), HostPath: c.getResolvPath(sandboxID),
Readonly: securityContext.GetReadonlyRootfs(), Readonly: securityContext.GetReadonlyRootfs(),
}) })
} }
if !isInCRIMounts(devShm, config.GetMounts()) { if !isInCRIMounts(devShm, config.GetMounts()) {
sandboxDevShm := getSandboxDevShm(sandboxRootDir) sandboxDevShm := c.getSandboxDevShm(sandboxID)
if securityContext.GetNamespaceOptions().GetIpc() == runtime.NamespaceMode_NODE { if securityContext.GetNamespaceOptions().GetIpc() == runtime.NamespaceMode_NODE {
sandboxDevShm = devShm sandboxDevShm = devShm
} }
@ -457,7 +481,7 @@ func setOCIProcessArgs(g *generate.Generator, config *runtime.ContainerConfig, i
} }
} }
if len(command) == 0 && len(args) == 0 { if len(command) == 0 && len(args) == 0 {
return fmt.Errorf("no command specified") return errors.New("no command specified")
} }
g.SetProcessArgs(append(command, args...)) g.SetProcessArgs(append(command, args...))
return nil return nil
@ -469,7 +493,7 @@ func addImageEnvs(g *generate.Generator, imageEnvs []string) error {
for _, e := range imageEnvs { for _, e := range imageEnvs {
kv := strings.SplitN(e, "=", 2) kv := strings.SplitN(e, "=", 2)
if len(kv) != 2 { if len(kv) != 2 {
return fmt.Errorf("invalid environment variable %q", e) return errors.Errorf("invalid environment variable %q", e)
} }
g.AddProcessEnv(kv[0], kv[1]) g.AddProcessEnv(kv[0], kv[1])
} }
@ -481,7 +505,7 @@ func setOCIPrivileged(g *generate.Generator, config *runtime.ContainerConfig) er
g.SetupPrivileged(true) g.SetupPrivileged(true)
setOCIBindMountsPrivileged(g) setOCIBindMountsPrivileged(g)
if err := setOCIDevicesPrivileged(g); err != nil { if err := setOCIDevicesPrivileged(g); err != nil {
return fmt.Errorf("failed to set devices mapping %+v: %v", config.GetDevices(), err) return errors.Wrapf(err, "failed to set devices mapping %+v", config.GetDevices())
} }
return nil return nil
} }
@ -497,7 +521,7 @@ func clearReadOnly(m *runtimespec.Mount) {
} }
// addDevices set device mapping without privilege. // addDevices set device mapping without privilege.
func (c *criContainerdService) addOCIDevices(g *generate.Generator, devs []*runtime.Device) error { func (c *criService) addOCIDevices(g *generate.Generator, devs []*runtime.Device) error {
spec := g.Spec() spec := g.Spec()
for _, device := range devs { for _, device := range devs {
path, err := c.os.ResolveSymbolicLink(device.HostPath) path, err := c.os.ResolveSymbolicLink(device.HostPath)
@ -560,7 +584,7 @@ func setOCIDevicesPrivileged(g *generate.Generator) error {
} }
// addOCIBindMounts adds bind mounts. // addOCIBindMounts adds bind mounts.
func (c *criContainerdService) addOCIBindMounts(g *generate.Generator, mounts []*runtime.Mount, mountLabel string) error { func (c *criService) addOCIBindMounts(g *generate.Generator, mounts []*runtime.Mount, mountLabel string) error {
// Mount cgroup into the container as readonly, which inherits docker's behavior. // Mount cgroup into the container as readonly, which inherits docker's behavior.
g.AddCgroupsMount("ro") // nolint: errcheck g.AddCgroupsMount("ro") // nolint: errcheck
for _, mount := range mounts { for _, mount := range mounts {
@ -570,17 +594,17 @@ func (c *criContainerdService) addOCIBindMounts(g *generate.Generator, mounts []
// TODO(random-liu): Add CRI validation test for this case. // TODO(random-liu): Add CRI validation test for this case.
if _, err := c.os.Stat(src); err != nil { if _, err := c.os.Stat(src); err != nil {
if !os.IsNotExist(err) { if !os.IsNotExist(err) {
return fmt.Errorf("failed to stat %q: %v", src, err) return errors.Wrapf(err, "failed to stat %q", src)
} }
if err := c.os.MkdirAll(src, 0755); err != nil { if err := c.os.MkdirAll(src, 0755); err != nil {
return fmt.Errorf("failed to mkdir %q: %v", src, err) return errors.Wrapf(err, "failed to mkdir %q", src)
} }
} }
// TODO(random-liu): Add cri-containerd integration test or cri validation test // TODO(random-liu): Add cri-containerd integration test or cri validation test
// for this. // for this.
src, err := c.os.ResolveSymbolicLink(src) src, err := c.os.ResolveSymbolicLink(src)
if err != nil { if err != nil {
return fmt.Errorf("failed to resolve symlink %q: %v", src, err) return errors.Wrapf(err, "failed to resolve symlink %q", src)
} }
options := []string{"rbind"} options := []string{"rbind"}
@ -619,7 +643,7 @@ func (c *criContainerdService) addOCIBindMounts(g *generate.Generator, mounts []
if mount.GetSelinuxRelabel() { if mount.GetSelinuxRelabel() {
if err := label.Relabel(src, mountLabel, true); err != nil && err != unix.ENOTSUP { if err := label.Relabel(src, mountLabel, true); err != nil && err != unix.ENOTSUP {
return fmt.Errorf("relabel %q with %q failed: %v", src, mountLabel, err) return errors.Wrapf(err, "relabel %q with %q failed", src, mountLabel)
} }
} }
g.AddBindMount(src, dst, options) g.AddBindMount(src, dst, options)
@ -773,7 +797,7 @@ func generateSeccompSpecOpts(seccompProf string, privileged, seccompEnabled bool
} }
if !seccompEnabled { if !seccompEnabled {
if seccompProf != "" && seccompProf != unconfinedProfile { if seccompProf != "" && seccompProf != unconfinedProfile {
return nil, fmt.Errorf("seccomp is not supported") return nil, errors.New("seccomp is not supported")
} }
return nil, nil return nil, nil
} }
@ -787,7 +811,7 @@ func generateSeccompSpecOpts(seccompProf string, privileged, seccompEnabled bool
default: default:
// Require and Trim default profile name prefix // Require and Trim default profile name prefix
if !strings.HasPrefix(seccompProf, profileNamePrefix) { if !strings.HasPrefix(seccompProf, profileNamePrefix) {
return nil, fmt.Errorf("invalid seccomp profile %q", seccompProf) return nil, errors.Errorf("invalid seccomp profile %q", seccompProf)
} }
return seccomp.WithProfile(strings.TrimPrefix(seccompProf, profileNamePrefix)), nil return seccomp.WithProfile(strings.TrimPrefix(seccompProf, profileNamePrefix)), nil
} }
@ -799,7 +823,7 @@ func generateApparmorSpecOpts(apparmorProf string, privileged, apparmorEnabled b
// Should fail loudly if user try to specify apparmor profile // Should fail loudly if user try to specify apparmor profile
// but we don't support it. // but we don't support it.
if apparmorProf != "" && apparmorProf != unconfinedProfile { if apparmorProf != "" && apparmorProf != unconfinedProfile {
return nil, fmt.Errorf("apparmor is not supported") return nil, errors.New("apparmor is not supported")
} }
return nil, nil return nil, nil
} }
@ -819,7 +843,7 @@ func generateApparmorSpecOpts(apparmorProf string, privileged, apparmorEnabled b
default: default:
// Require and Trim default profile name prefix // Require and Trim default profile name prefix
if !strings.HasPrefix(apparmorProf, profileNamePrefix) { if !strings.HasPrefix(apparmorProf, profileNamePrefix) {
return nil, fmt.Errorf("invalid apparmor profile %q", apparmorProf) return nil, errors.Errorf("invalid apparmor profile %q", apparmorProf)
} }
return apparmor.WithProfile(strings.TrimPrefix(apparmorProf, profileNamePrefix)), nil return apparmor.WithProfile(strings.TrimPrefix(apparmorProf, profileNamePrefix)), nil
} }
@ -840,7 +864,7 @@ func ensureShared(path string, lookupMount func(string) (mount.Info, error)) err
} }
} }
return fmt.Errorf("path %q is mounted on %q but it is not a shared mount", path, mountInfo.Mountpoint) return errors.Errorf("path %q is mounted on %q but it is not a shared mount", path, mountInfo.Mountpoint)
} }
// Ensure mount point on which path is mounted, is either shared or slave. // Ensure mount point on which path is mounted, is either shared or slave.
@ -858,5 +882,5 @@ func ensureSharedOrSlave(path string, lookupMount func(string) (mount.Info, erro
return nil return nil
} }
} }
return fmt.Errorf("path %q is mounted on %q but it is not a shared or slave mount", path, mountInfo.Mountpoint) return errors.Errorf("path %q is mounted on %q but it is not a shared or slave mount", path, mountInfo.Mountpoint)
} }

View File

@ -17,21 +17,20 @@ limitations under the License.
package server package server
import ( import (
"fmt" "github.com/pkg/errors"
"golang.org/x/net/context" "golang.org/x/net/context"
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
) )
// Exec prepares a streaming endpoint to execute a command in the container, and returns the address. // Exec prepares a streaming endpoint to execute a command in the container, and returns the address.
func (c *criContainerdService) Exec(ctx context.Context, r *runtime.ExecRequest) (*runtime.ExecResponse, error) { func (c *criService) Exec(ctx context.Context, r *runtime.ExecRequest) (*runtime.ExecResponse, error) {
cntr, err := c.containerStore.Get(r.GetContainerId()) cntr, err := c.containerStore.Get(r.GetContainerId())
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to find container %q in store: %v", r.GetContainerId(), err) return nil, errors.Wrapf(err, "failed to find container %q in store", r.GetContainerId())
} }
state := cntr.Status.Get().State() state := cntr.Status.Get().State()
if state != runtime.ContainerState_CONTAINER_RUNNING { if state != runtime.ContainerState_CONTAINER_RUNNING {
return nil, fmt.Errorf("container is in %s state", criContainerStateToString(state)) return nil, errors.Errorf("container is in %s state", criContainerStateToString(state))
} }
return c.streamServer.GetExec(r) return c.streamServer.GetExec(r)
} }

View File

@ -18,13 +18,13 @@ package server
import ( import (
"bytes" "bytes"
"fmt"
"io" "io"
"time" "time"
"github.com/containerd/containerd" "github.com/containerd/containerd"
containerdio "github.com/containerd/containerd/cio" containerdio "github.com/containerd/containerd/cio"
"github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/errdefs"
"github.com/pkg/errors"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"golang.org/x/net/context" "golang.org/x/net/context"
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
@ -39,7 +39,7 @@ import (
// ExecSync executes a command in the container, and returns the stdout output. // ExecSync executes a command in the container, and returns the stdout output.
// If command exits with a non-zero exit code, an error is returned. // If command exits with a non-zero exit code, an error is returned.
func (c *criContainerdService) ExecSync(ctx context.Context, r *runtime.ExecSyncRequest) (*runtime.ExecSyncResponse, error) { func (c *criService) ExecSync(ctx context.Context, r *runtime.ExecSyncRequest) (*runtime.ExecSyncResponse, error) {
var stdout, stderr bytes.Buffer var stdout, stderr bytes.Buffer
exitCode, err := c.execInContainer(ctx, r.GetContainerId(), execOptions{ exitCode, err := c.execInContainer(ctx, r.GetContainerId(), execOptions{
cmd: r.GetCmd(), cmd: r.GetCmd(),
@ -48,7 +48,7 @@ func (c *criContainerdService) ExecSync(ctx context.Context, r *runtime.ExecSync
timeout: time.Duration(r.GetTimeout()) * time.Second, timeout: time.Duration(r.GetTimeout()) * time.Second,
}) })
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to exec in container: %v", err) return nil, errors.Wrap(err, "failed to exec in container")
} }
return &runtime.ExecSyncResponse{ return &runtime.ExecSyncResponse{
@ -71,7 +71,7 @@ type execOptions struct {
// execInContainer executes a command inside the container synchronously, and // execInContainer executes a command inside the container synchronously, and
// redirects stdio stream properly. // redirects stdio stream properly.
func (c *criContainerdService) execInContainer(ctx context.Context, id string, opts execOptions) (*uint32, error) { func (c *criService) execInContainer(ctx context.Context, id string, opts execOptions) (*uint32, error) {
// Cancel the context before returning to ensure goroutines are stopped. // Cancel the context before returning to ensure goroutines are stopped.
// This is important, because if `Start` returns error, `Wait` will hang // This is important, because if `Start` returns error, `Wait` will hang
// forever unless we cancel the context. // forever unless we cancel the context.
@ -81,23 +81,23 @@ func (c *criContainerdService) execInContainer(ctx context.Context, id string, o
// Get container from our container store. // Get container from our container store.
cntr, err := c.containerStore.Get(id) cntr, err := c.containerStore.Get(id)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to find container %q in store: %v", id, err) return nil, errors.Wrapf(err, "failed to find container %q in store", id)
} }
id = cntr.ID id = cntr.ID
state := cntr.Status.Get().State() state := cntr.Status.Get().State()
if state != runtime.ContainerState_CONTAINER_RUNNING { if state != runtime.ContainerState_CONTAINER_RUNNING {
return nil, fmt.Errorf("container is in %s state", criContainerStateToString(state)) return nil, errors.Errorf("container is in %s state", criContainerStateToString(state))
} }
container := cntr.Container container := cntr.Container
spec, err := container.Spec(ctx) spec, err := container.Spec(ctx)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to get container spec: %v", err) return nil, errors.Wrap(err, "failed to get container spec")
} }
task, err := container.Task(ctx, nil) task, err := container.Task(ctx, nil)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to load task: %v", err) return nil, errors.Wrap(err, "failed to load task")
} }
if opts.tty { if opts.tty {
g := newSpecGenerator(spec) g := newSpecGenerator(spec)
@ -116,17 +116,17 @@ func (c *criContainerdService) execInContainer(ctx context.Context, id string, o
} }
execID := util.GenerateID() execID := util.GenerateID()
logrus.Debugf("Generated exec id %q for container %q", execID, id) logrus.Debugf("Generated exec id %q for container %q", execID, id)
rootDir := getContainerRootDir(c.config.RootDir, id) volatileRootDir := c.getVolatileContainerRootDir(id)
var execIO *cio.ExecIO var execIO *cio.ExecIO
process, err := task.Exec(ctx, execID, pspec, process, err := task.Exec(ctx, execID, pspec,
func(id string) (containerdio.IO, error) { func(id string) (containerdio.IO, error) {
var err error var err error
execIO, err = cio.NewExecIO(id, rootDir, opts.tty, opts.stdin != nil) execIO, err = cio.NewExecIO(id, volatileRootDir, opts.tty, opts.stdin != nil)
return execIO, err return execIO, err
}, },
) )
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to create exec %q: %v", execID, err) return nil, errors.Wrapf(err, "failed to create exec %q", execID)
} }
defer func() { defer func() {
deferCtx, deferCancel := ctrdutil.DeferContext() deferCtx, deferCancel := ctrdutil.DeferContext()
@ -138,10 +138,10 @@ func (c *criContainerdService) execInContainer(ctx context.Context, id string, o
exitCh, err := process.Wait(ctx) exitCh, err := process.Wait(ctx)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to wait for process %q: %v", execID, err) return nil, errors.Wrapf(err, "failed to wait for process %q", execID)
} }
if err := process.Start(ctx); err != nil { if err := process.Start(ctx); err != nil {
return nil, fmt.Errorf("failed to start exec %q: %v", execID, err) return nil, errors.Wrapf(err, "failed to start exec %q", execID)
} }
handleResizing(opts.resize, func(size remotecommand.TerminalSize) { handleResizing(opts.resize, func(size remotecommand.TerminalSize) {
@ -173,7 +173,7 @@ func (c *criContainerdService) execInContainer(ctx context.Context, id string, o
//TODO(Abhi) Use context.WithDeadline instead of timeout. //TODO(Abhi) Use context.WithDeadline instead of timeout.
// Ignore the not found error because the process may exit itself before killing. // Ignore the not found error because the process may exit itself before killing.
if err := process.Kill(ctx, unix.SIGKILL); err != nil && !errdefs.IsNotFound(err) { if err := process.Kill(ctx, unix.SIGKILL); err != nil && !errdefs.IsNotFound(err) {
return nil, fmt.Errorf("failed to kill exec %q: %v", execID, err) return nil, errors.Wrapf(err, "failed to kill exec %q", execID)
} }
// Wait for the process to be killed. // Wait for the process to be killed.
exitRes := <-exitCh exitRes := <-exitCh
@ -181,12 +181,12 @@ func (c *criContainerdService) execInContainer(ctx context.Context, id string, o
execID, exitRes.ExitCode(), exitRes.Error()) execID, exitRes.ExitCode(), exitRes.Error())
<-attachDone <-attachDone
logrus.Debugf("Stream pipe for exec process %q done", execID) logrus.Debugf("Stream pipe for exec process %q done", execID)
return nil, fmt.Errorf("timeout %v exceeded", opts.timeout) return nil, errors.Errorf("timeout %v exceeded", opts.timeout)
case exitRes := <-exitCh: case exitRes := <-exitCh:
code, _, err := exitRes.Result() code, _, err := exitRes.Result()
logrus.Infof("Exec process %q exits with exit code %d and error %v", execID, code, err) logrus.Infof("Exec process %q exits with exit code %d and error %v", execID, code, err)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed while waiting for exec %q: %v", execID, err) return nil, errors.Wrapf(err, "failed while waiting for exec %q", execID)
} }
<-attachDone <-attachDone
logrus.Debugf("Stream pipe for exec process %q done", execID) logrus.Debugf("Stream pipe for exec process %q done", execID)

View File

@ -25,7 +25,7 @@ import (
) )
// ListContainers lists all containers matching the filter. // ListContainers lists all containers matching the filter.
func (c *criContainerdService) ListContainers(ctx context.Context, r *runtime.ListContainersRequest) (*runtime.ListContainersResponse, error) { func (c *criService) ListContainers(ctx context.Context, r *runtime.ListContainersRequest) (*runtime.ListContainersResponse, error) {
// List all containers from store. // List all containers from store.
containersInStore := c.containerStore.List() containersInStore := c.containerStore.List()
@ -54,7 +54,7 @@ func toCRIContainer(container containerstore.Container) *runtime.Container {
} }
} }
func (c *criContainerdService) normalizeContainerFilter(filter *runtime.ContainerFilter) { func (c *criService) normalizeContainerFilter(filter *runtime.ContainerFilter) {
if cntr, err := c.containerStore.Get(filter.GetId()); err == nil { if cntr, err := c.containerStore.Get(filter.GetId()); err == nil {
filter.Id = cntr.ID filter.Id = cntr.ID
} }
@ -64,7 +64,7 @@ func (c *criContainerdService) normalizeContainerFilter(filter *runtime.Containe
} }
// filterCRIContainers filters CRIContainers. // filterCRIContainers filters CRIContainers.
func (c *criContainerdService) filterCRIContainers(containers []*runtime.Container, filter *runtime.ContainerFilter) []*runtime.Container { func (c *criService) filterCRIContainers(containers []*runtime.Container, filter *runtime.ContainerFilter) []*runtime.Container {
if filter == nil { if filter == nil {
return containers return containers
} }

View File

@ -17,7 +17,7 @@ limitations under the License.
package server package server
import ( import (
"fmt" "github.com/pkg/errors"
"golang.org/x/net/context" "golang.org/x/net/context"
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
@ -25,14 +25,14 @@ import (
// ReopenContainerLog asks the cri plugin to reopen the stdout/stderr log file for the container. // ReopenContainerLog asks the cri plugin to reopen the stdout/stderr log file for the container.
// This is often called after the log file has been rotated. // This is often called after the log file has been rotated.
func (c *criContainerdService) ReopenContainerLog(ctx context.Context, r *runtime.ReopenContainerLogRequest) (*runtime.ReopenContainerLogResponse, error) { func (c *criService) ReopenContainerLog(ctx context.Context, r *runtime.ReopenContainerLogRequest) (*runtime.ReopenContainerLogResponse, error) {
container, err := c.containerStore.Get(r.GetContainerId()) container, err := c.containerStore.Get(r.GetContainerId())
if err != nil { if err != nil {
return nil, fmt.Errorf("an error occurred when try to find container %q: %v", r.GetContainerId(), err) return nil, errors.Wrapf(err, "an error occurred when try to find container %q", r.GetContainerId())
} }
if container.Status.Get().State() != runtime.ContainerState_CONTAINER_RUNNING { if container.Status.Get().State() != runtime.ContainerState_CONTAINER_RUNNING {
return nil, fmt.Errorf("container is not running") return nil, errors.New("container is not running")
} }
// Create new container logger and replace the existing ones. // Create new container logger and replace the existing ones.

View File

@ -17,11 +17,10 @@ limitations under the License.
package server package server
import ( import (
"fmt"
"github.com/containerd/containerd" "github.com/containerd/containerd"
"github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/errdefs"
"github.com/docker/docker/pkg/system" "github.com/docker/docker/pkg/system"
"github.com/pkg/errors"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"golang.org/x/net/context" "golang.org/x/net/context"
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
@ -33,11 +32,11 @@ import (
// RemoveContainer removes the container. // RemoveContainer removes the container.
// TODO(random-liu): Forcibly stop container if it's running. // TODO(random-liu): Forcibly stop container if it's running.
func (c *criContainerdService) RemoveContainer(ctx context.Context, r *runtime.RemoveContainerRequest) (_ *runtime.RemoveContainerResponse, retErr error) { func (c *criService) RemoveContainer(ctx context.Context, r *runtime.RemoveContainerRequest) (_ *runtime.RemoveContainerResponse, retErr error) {
container, err := c.containerStore.Get(r.GetContainerId()) container, err := c.containerStore.Get(r.GetContainerId())
if err != nil { if err != nil {
if err != store.ErrNotExist { if err != store.ErrNotExist {
return nil, fmt.Errorf("an error occurred when try to find container %q: %v", r.GetContainerId(), err) return nil, errors.Wrapf(err, "an error occurred when try to find container %q", r.GetContainerId())
} }
// Do not return error if container metadata doesn't exist. // Do not return error if container metadata doesn't exist.
log.Tracef("RemoveContainer called for container %q that does not exist", r.GetContainerId()) log.Tracef("RemoveContainer called for container %q that does not exist", r.GetContainerId())
@ -48,7 +47,7 @@ func (c *criContainerdService) RemoveContainer(ctx context.Context, r *runtime.R
// Set removing state to prevent other start/remove operations against this container // Set removing state to prevent other start/remove operations against this container
// while it's being removed. // while it's being removed.
if err := setContainerRemoving(container); err != nil { if err := setContainerRemoving(container); err != nil {
return nil, fmt.Errorf("failed to set removing state for container %q: %v", id, err) return nil, errors.Wrapf(err, "failed to set removing state for container %q", id)
} }
defer func() { defer func() {
if retErr != nil { if retErr != nil {
@ -67,20 +66,25 @@ func (c *criContainerdService) RemoveContainer(ctx context.Context, r *runtime.R
// Delete containerd container. // Delete containerd container.
if err := container.Container.Delete(ctx, containerd.WithSnapshotCleanup); err != nil { if err := container.Container.Delete(ctx, containerd.WithSnapshotCleanup); err != nil {
if !errdefs.IsNotFound(err) { if !errdefs.IsNotFound(err) {
return nil, fmt.Errorf("failed to delete containerd container %q: %v", id, err) return nil, errors.Wrapf(err, "failed to delete containerd container %q", id)
} }
log.Tracef("Remove called for containerd container %q that does not exist", id) log.Tracef("Remove called for containerd container %q that does not exist", id)
} }
// Delete container checkpoint. // Delete container checkpoint.
if err := container.Delete(); err != nil { if err := container.Delete(); err != nil {
return nil, fmt.Errorf("failed to delete container checkpoint for %q: %v", id, err) return nil, errors.Wrapf(err, "failed to delete container checkpoint for %q", id)
} }
containerRootDir := getContainerRootDir(c.config.RootDir, id) containerRootDir := c.getContainerRootDir(id)
if err := system.EnsureRemoveAll(containerRootDir); err != nil { if err := system.EnsureRemoveAll(containerRootDir); err != nil {
return nil, fmt.Errorf("failed to remove container root directory %q: %v", return nil, errors.Wrapf(err, "failed to remove container root directory %q",
containerRootDir, err) containerRootDir)
}
volatileContainerRootDir := c.getVolatileContainerRootDir(id)
if err := system.EnsureRemoveAll(volatileContainerRootDir); err != nil {
return nil, errors.Wrapf(err, "failed to remove volatile container root directory %q",
volatileContainerRootDir)
} }
c.containerStore.Delete(id) c.containerStore.Delete(id)
@ -96,10 +100,10 @@ func setContainerRemoving(container containerstore.Container) error {
return container.Status.Update(func(status containerstore.Status) (containerstore.Status, error) { return container.Status.Update(func(status containerstore.Status) (containerstore.Status, error) {
// Do not remove container if it's still running. // Do not remove container if it's still running.
if status.State() == runtime.ContainerState_CONTAINER_RUNNING { if status.State() == runtime.ContainerState_CONTAINER_RUNNING {
return status, fmt.Errorf("container is still running") return status, errors.New("container is still running")
} }
if status.Removing { if status.Removing {
return status, fmt.Errorf("container is already in removing state") return status, errors.New("container is already in removing state")
} }
status.Removing = true status.Removing = true
return status, nil return status, nil

View File

@ -17,13 +17,13 @@ limitations under the License.
package server package server
import ( import (
"fmt"
"io" "io"
"time" "time"
"github.com/containerd/containerd" "github.com/containerd/containerd"
containerdio "github.com/containerd/containerd/cio" containerdio "github.com/containerd/containerd/cio"
"github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/errdefs"
"github.com/pkg/errors"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"golang.org/x/net/context" "golang.org/x/net/context"
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
@ -35,10 +35,10 @@ import (
) )
// StartContainer starts the container. // StartContainer starts the container.
func (c *criContainerdService) StartContainer(ctx context.Context, r *runtime.StartContainerRequest) (retRes *runtime.StartContainerResponse, retErr error) { func (c *criService) StartContainer(ctx context.Context, r *runtime.StartContainerRequest) (retRes *runtime.StartContainerResponse, retErr error) {
container, err := c.containerStore.Get(r.GetContainerId()) container, err := c.containerStore.Get(r.GetContainerId())
if err != nil { if err != nil {
return nil, fmt.Errorf("an error occurred when try to find container %q: %v", r.GetContainerId(), err) return nil, errors.Wrapf(err, "an error occurred when try to find container %q", r.GetContainerId())
} }
var startErr error var startErr error
@ -51,14 +51,14 @@ func (c *criContainerdService) StartContainer(ctx context.Context, r *runtime.St
}); startErr != nil { }); startErr != nil {
return nil, startErr return nil, startErr
} else if err != nil { } else if err != nil {
return nil, fmt.Errorf("failed to update container %q metadata: %v", container.ID, err) return nil, errors.Wrapf(err, "failed to update container %q metadata", container.ID)
} }
return &runtime.StartContainerResponse{}, nil return &runtime.StartContainerResponse{}, nil
} }
// startContainer actually starts the container. The function needs to be run in one transaction. Any updates // startContainer actually starts the container. The function needs to be run in one transaction. Any updates
// to the status passed in will be applied no matter the function returns error or not. // to the status passed in will be applied no matter the function returns error or not.
func (c *criContainerdService) startContainer(ctx context.Context, func (c *criService) startContainer(ctx context.Context,
cntr containerstore.Container, cntr containerstore.Container,
status *containerstore.Status) (retErr error) { status *containerstore.Status) (retErr error) {
id := cntr.ID id := cntr.ID
@ -68,11 +68,11 @@ func (c *criContainerdService) startContainer(ctx context.Context,
// Return error if container is not in created state. // Return error if container is not in created state.
if status.State() != runtime.ContainerState_CONTAINER_CREATED { if status.State() != runtime.ContainerState_CONTAINER_CREATED {
return fmt.Errorf("container %q is in %s state", id, criContainerStateToString(status.State())) return errors.Errorf("container %q is in %s state", id, criContainerStateToString(status.State()))
} }
// Do not start the container when there is a removal in progress. // Do not start the container when there is a removal in progress.
if status.Removing { if status.Removing {
return fmt.Errorf("container %q is in removing state", id) return errors.Errorf("container %q is in removing state", id)
} }
defer func() { defer func() {
@ -89,17 +89,17 @@ func (c *criContainerdService) startContainer(ctx context.Context,
// Get sandbox config from sandbox store. // Get sandbox config from sandbox store.
sandbox, err := c.sandboxStore.Get(meta.SandboxID) sandbox, err := c.sandboxStore.Get(meta.SandboxID)
if err != nil { if err != nil {
return fmt.Errorf("sandbox %q not found: %v", meta.SandboxID, err) return errors.Wrapf(err, "sandbox %q not found", meta.SandboxID)
} }
sandboxID := meta.SandboxID sandboxID := meta.SandboxID
if sandbox.Status.Get().State != sandboxstore.StateReady { if sandbox.Status.Get().State != sandboxstore.StateReady {
return fmt.Errorf("sandbox container %q is not running", sandboxID) return errors.Errorf("sandbox container %q is not running", sandboxID)
} }
ioCreation := func(id string) (_ containerdio.IO, err error) { ioCreation := func(id string) (_ containerdio.IO, err error) {
stdoutWC, stderrWC, err := createContainerLoggers(meta.LogPath, config.GetTty()) stdoutWC, stderrWC, err := createContainerLoggers(meta.LogPath, config.GetTty())
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to create container loggers: %v", err) return nil, errors.Wrap(err, "failed to create container loggers")
} }
defer func() { defer func() {
if err != nil { if err != nil {
@ -118,7 +118,7 @@ func (c *criContainerdService) startContainer(ctx context.Context,
task, err := container.NewTask(ctx, ioCreation) task, err := container.NewTask(ctx, ioCreation)
if err != nil { if err != nil {
return fmt.Errorf("failed to create containerd task: %v", err) return errors.Wrap(err, "failed to create containerd task")
} }
defer func() { defer func() {
if retErr != nil { if retErr != nil {
@ -133,7 +133,7 @@ func (c *criContainerdService) startContainer(ctx context.Context,
// Start containerd task. // Start containerd task.
if err := task.Start(ctx); err != nil { if err := task.Start(ctx); err != nil {
return fmt.Errorf("failed to start containerd task %q: %v", id, err) return errors.Wrapf(err, "failed to start containerd task %q", id)
} }
// Update container start timestamp. // Update container start timestamp.
@ -147,7 +147,7 @@ func createContainerLoggers(logPath string, tty bool) (stdout io.WriteCloser, st
if logPath != "" { if logPath != "" {
// Only generate container log when log path is specified. // Only generate container log when log path is specified.
if stdout, err = cio.NewCRILogger(logPath, cio.Stdout); err != nil { if stdout, err = cio.NewCRILogger(logPath, cio.Stdout); err != nil {
return nil, nil, fmt.Errorf("failed to start container stdout logger: %v", err) return nil, nil, errors.Wrap(err, "failed to start container stdout logger")
} }
defer func() { defer func() {
if err != nil { if err != nil {
@ -157,7 +157,7 @@ func createContainerLoggers(logPath string, tty bool) (stdout io.WriteCloser, st
// Only redirect stderr when there is no tty. // Only redirect stderr when there is no tty.
if !tty { if !tty {
if stderr, err = cio.NewCRILogger(logPath, cio.Stderr); err != nil { if stderr, err = cio.NewCRILogger(logPath, cio.Stderr); err != nil {
return nil, nil, fmt.Errorf("failed to start container stderr logger: %v", err) return nil, nil, errors.Wrap(err, "failed to start container stderr logger")
} }
} }
} else { } else {

View File

@ -17,32 +17,31 @@ limitations under the License.
package server package server
import ( import (
"fmt"
tasks "github.com/containerd/containerd/api/services/tasks/v1" tasks "github.com/containerd/containerd/api/services/tasks/v1"
"github.com/pkg/errors"
"golang.org/x/net/context" "golang.org/x/net/context"
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
) )
// ContainerStats returns stats of the container. If the container does not // ContainerStats returns stats of the container. If the container does not
// exist, the call returns an error. // exist, the call returns an error.
func (c *criContainerdService) ContainerStats(ctx context.Context, in *runtime.ContainerStatsRequest) (*runtime.ContainerStatsResponse, error) { func (c *criService) ContainerStats(ctx context.Context, in *runtime.ContainerStatsRequest) (*runtime.ContainerStatsResponse, error) {
cntr, err := c.containerStore.Get(in.GetContainerId()) cntr, err := c.containerStore.Get(in.GetContainerId())
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to find container: %v", err) return nil, errors.Wrap(err, "failed to find container")
} }
request := &tasks.MetricsRequest{Filters: []string{"id==" + cntr.ID}} request := &tasks.MetricsRequest{Filters: []string{"id==" + cntr.ID}}
resp, err := c.client.TaskService().Metrics(ctx, request) resp, err := c.client.TaskService().Metrics(ctx, request)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to fetch metrics for task: %v", err) return nil, errors.Wrap(err, "failed to fetch metrics for task")
} }
if len(resp.Metrics) != 1 { if len(resp.Metrics) != 1 {
return nil, fmt.Errorf("unexpected metrics response: %+v", resp.Metrics) return nil, errors.Errorf("unexpected metrics response: %+v", resp.Metrics)
} }
cs, err := c.getContainerMetrics(cntr.Metadata, resp.Metrics[0]) cs, err := c.getContainerMetrics(cntr.Metadata, resp.Metrics[0])
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to decode container metrics: %v", err) return nil, errors.Wrap(err, "failed to decode container metrics")
} }
return &runtime.ContainerStatsResponse{Stats: cs}, nil return &runtime.ContainerStatsResponse{Stats: cs}, nil
} }

View File

@ -17,12 +17,11 @@ limitations under the License.
package server package server
import ( import (
"fmt"
"github.com/containerd/cgroups" "github.com/containerd/cgroups"
tasks "github.com/containerd/containerd/api/services/tasks/v1" tasks "github.com/containerd/containerd/api/services/tasks/v1"
"github.com/containerd/containerd/api/types" "github.com/containerd/containerd/api/types"
"github.com/containerd/typeurl" "github.com/containerd/typeurl"
"github.com/pkg/errors"
"golang.org/x/net/context" "golang.org/x/net/context"
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
@ -30,26 +29,26 @@ import (
) )
// ListContainerStats returns stats of all running containers. // ListContainerStats returns stats of all running containers.
func (c *criContainerdService) ListContainerStats( func (c *criService) ListContainerStats(
ctx context.Context, ctx context.Context,
in *runtime.ListContainerStatsRequest, in *runtime.ListContainerStatsRequest,
) (*runtime.ListContainerStatsResponse, error) { ) (*runtime.ListContainerStatsResponse, error) {
request, containers, err := c.buildTaskMetricsRequest(in) request, containers, err := c.buildTaskMetricsRequest(in)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to build metrics request: %v", err) return nil, errors.Wrap(err, "failed to build metrics request")
} }
resp, err := c.client.TaskService().Metrics(ctx, &request) resp, err := c.client.TaskService().Metrics(ctx, &request)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to fetch metrics for tasks: %v", err) return nil, errors.Wrap(err, "failed to fetch metrics for tasks")
} }
criStats, err := c.toCRIContainerStats(resp.Metrics, containers) criStats, err := c.toCRIContainerStats(resp.Metrics, containers)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to convert to cri containerd stats format: %v", err) return nil, errors.Wrap(err, "failed to convert to cri containerd stats format")
} }
return criStats, nil return criStats, nil
} }
func (c *criContainerdService) toCRIContainerStats( func (c *criService) toCRIContainerStats(
stats []*types.Metric, stats []*types.Metric,
containers []containerstore.Container, containers []containerstore.Container,
) (*runtime.ListContainerStatsResponse, error) { ) (*runtime.ListContainerStatsResponse, error) {
@ -61,14 +60,14 @@ func (c *criContainerdService) toCRIContainerStats(
for _, cntr := range containers { for _, cntr := range containers {
cs, err := c.getContainerMetrics(cntr.Metadata, statsMap[cntr.ID]) cs, err := c.getContainerMetrics(cntr.Metadata, statsMap[cntr.ID])
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to decode container metrics for %q: %v", cntr.ID, err) return nil, errors.Wrapf(err, "failed to decode container metrics for %q", cntr.ID)
} }
containerStats.Stats = append(containerStats.Stats, cs) containerStats.Stats = append(containerStats.Stats, cs)
} }
return containerStats, nil return containerStats, nil
} }
func (c *criContainerdService) getContainerMetrics( func (c *criService) getContainerMetrics(
meta containerstore.Metadata, meta containerstore.Metadata,
stats *types.Metric, stats *types.Metric,
) (*runtime.ContainerStats, error) { ) (*runtime.ContainerStats, error) {
@ -99,7 +98,7 @@ func (c *criContainerdService) getContainerMetrics(
if stats != nil { if stats != nil {
s, err := typeurl.UnmarshalAny(stats.Data) s, err := typeurl.UnmarshalAny(stats.Data)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to extract container metrics: %v", err) return nil, errors.Wrap(err, "failed to extract container metrics")
} }
metrics := s.(*cgroups.Metrics) metrics := s.(*cgroups.Metrics)
if metrics.CPU != nil && metrics.CPU.Usage != nil { if metrics.CPU != nil && metrics.CPU.Usage != nil {
@ -119,7 +118,7 @@ func (c *criContainerdService) getContainerMetrics(
return &cs, nil return &cs, nil
} }
func (c *criContainerdService) normalizeContainerStatsFilter(filter *runtime.ContainerStatsFilter) { func (c *criService) normalizeContainerStatsFilter(filter *runtime.ContainerStatsFilter) {
if cntr, err := c.containerStore.Get(filter.GetId()); err == nil { if cntr, err := c.containerStore.Get(filter.GetId()); err == nil {
filter.Id = cntr.ID filter.Id = cntr.ID
} }
@ -130,7 +129,7 @@ func (c *criContainerdService) normalizeContainerStatsFilter(filter *runtime.Con
// buildTaskMetricsRequest constructs a tasks.MetricsRequest based on // buildTaskMetricsRequest constructs a tasks.MetricsRequest based on
// the information in the stats request and the containerStore // the information in the stats request and the containerStore
func (c *criContainerdService) buildTaskMetricsRequest( func (c *criService) buildTaskMetricsRequest(
r *runtime.ListContainerStatsRequest, r *runtime.ListContainerStatsRequest,
) (tasks.MetricsRequest, []containerstore.Container, error) { ) (tasks.MetricsRequest, []containerstore.Container, error) {
var req tasks.MetricsRequest var req tasks.MetricsRequest

View File

@ -18,21 +18,21 @@ package server
import ( import (
"encoding/json" "encoding/json"
"fmt"
runtimespec "github.com/opencontainers/runtime-spec/specs-go" runtimespec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus" "github.com/pkg/errors"
"golang.org/x/net/context" "golang.org/x/net/context"
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
criconfig "github.com/containerd/cri/pkg/config"
containerstore "github.com/containerd/cri/pkg/store/container" containerstore "github.com/containerd/cri/pkg/store/container"
) )
// ContainerStatus inspects the container and returns the status. // ContainerStatus inspects the container and returns the status.
func (c *criContainerdService) ContainerStatus(ctx context.Context, r *runtime.ContainerStatusRequest) (*runtime.ContainerStatusResponse, error) { func (c *criService) ContainerStatus(ctx context.Context, r *runtime.ContainerStatusRequest) (*runtime.ContainerStatusResponse, error) {
container, err := c.containerStore.Get(r.GetContainerId()) container, err := c.containerStore.Get(r.GetContainerId())
if err != nil { if err != nil {
return nil, fmt.Errorf("an error occurred when try to find container %q: %v", r.GetContainerId(), err) return nil, errors.Wrapf(err, "an error occurred when try to find container %q", r.GetContainerId())
} }
// TODO(random-liu): Clean up the following logic in CRI. // TODO(random-liu): Clean up the following logic in CRI.
@ -44,7 +44,7 @@ func (c *criContainerdService) ContainerStatus(ctx context.Context, r *runtime.C
imageRef := container.ImageRef imageRef := container.ImageRef
image, err := c.imageStore.Get(imageRef) image, err := c.imageStore.Get(imageRef)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to get image %q: %v", imageRef, err) return nil, errors.Wrapf(err, "failed to get image %q", imageRef)
} }
if len(image.RepoTags) > 0 { if len(image.RepoTags) > 0 {
// Based on current behavior of dockershim, this field should be // Based on current behavior of dockershim, this field should be
@ -58,7 +58,7 @@ func (c *criContainerdService) ContainerStatus(ctx context.Context, r *runtime.C
status := toCRIContainerStatus(container, spec, imageRef) status := toCRIContainerStatus(container, spec, imageRef)
info, err := toCRIContainerInfo(ctx, container, r.GetVerbose()) info, err := toCRIContainerInfo(ctx, container, r.GetVerbose())
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to get verbose container info: %v", err) return nil, errors.Wrap(err, "failed to get verbose container info")
} }
return &runtime.ContainerStatusResponse{ return &runtime.ContainerStatusResponse{
@ -106,6 +106,7 @@ type containerInfo struct {
Removing bool `json:"removing"` Removing bool `json:"removing"`
SnapshotKey string `json:"snapshotKey"` SnapshotKey string `json:"snapshotKey"`
Snapshotter string `json:"snapshotter"` Snapshotter string `json:"snapshotter"`
Runtime *criconfig.Runtime `json:"runtime"`
Config *runtime.ContainerConfig `json:"config"` Config *runtime.ContainerConfig `json:"config"`
RuntimeSpec *runtimespec.Spec `json:"runtimeSpec"` RuntimeSpec *runtimespec.Spec `json:"runtimeSpec"`
} }
@ -128,24 +129,28 @@ func toCRIContainerInfo(ctx context.Context, container containerstore.Container,
Config: meta.Config, Config: meta.Config,
} }
spec, err := container.Container.Spec(ctx) var err error
if err == nil { ci.RuntimeSpec, err = container.Container.Spec(ctx)
ci.RuntimeSpec = spec if err != nil {
} else { return nil, errors.Wrap(err, "failed to get container runtime spec")
logrus.WithError(err).Errorf("Failed to get container %q spec", container.ID)
} }
ctrInfo, err := container.Container.Info(ctx) ctrInfo, err := container.Container.Info(ctx)
if err == nil { if err != nil {
return nil, errors.Wrap(err, "failed to get container info")
}
ci.SnapshotKey = ctrInfo.SnapshotKey ci.SnapshotKey = ctrInfo.SnapshotKey
ci.Snapshotter = ctrInfo.Snapshotter ci.Snapshotter = ctrInfo.Snapshotter
} else {
logrus.WithError(err).Errorf("Failed to get container %q info", container.ID) ociRuntime, err := getRuntimeConfigFromContainerInfo(ctrInfo)
if err != nil {
return nil, errors.Wrap(err, "failed to get container runtime config")
} }
ci.Runtime = &ociRuntime
infoBytes, err := json.Marshal(ci) infoBytes, err := json.Marshal(ci)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to marshal info %v: %v", ci, err) return nil, errors.Wrapf(err, "failed to marshal info %v", ci)
} }
return map[string]string{ return map[string]string{
"info": string(infoBytes), "info": string(infoBytes),

View File

@ -17,12 +17,12 @@ limitations under the License.
package server package server
import ( import (
"fmt"
"time" "time"
"github.com/containerd/containerd" "github.com/containerd/containerd"
"github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/errdefs"
"github.com/docker/docker/pkg/signal" "github.com/docker/docker/pkg/signal"
"github.com/pkg/errors"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"golang.org/x/net/context" "golang.org/x/net/context"
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
@ -36,11 +36,11 @@ import (
const killContainerTimeout = 2 * time.Minute const killContainerTimeout = 2 * time.Minute
// StopContainer stops a running container with a grace period (i.e., timeout). // StopContainer stops a running container with a grace period (i.e., timeout).
func (c *criContainerdService) StopContainer(ctx context.Context, r *runtime.StopContainerRequest) (*runtime.StopContainerResponse, error) { func (c *criService) StopContainer(ctx context.Context, r *runtime.StopContainerRequest) (*runtime.StopContainerResponse, error) {
// Get container config from container store. // Get container config from container store.
container, err := c.containerStore.Get(r.GetContainerId()) container, err := c.containerStore.Get(r.GetContainerId())
if err != nil { if err != nil {
return nil, fmt.Errorf("an error occurred when try to find container %q: %v", r.GetContainerId(), err) return nil, errors.Wrapf(err, "an error occurred when try to find container %q", r.GetContainerId())
} }
if err := c.stopContainer(ctx, container, time.Duration(r.GetTimeout())*time.Second); err != nil { if err := c.stopContainer(ctx, container, time.Duration(r.GetTimeout())*time.Second); err != nil {
@ -51,7 +51,7 @@ func (c *criContainerdService) StopContainer(ctx context.Context, r *runtime.Sto
} }
// stopContainer stops a container based on the container metadata. // stopContainer stops a container based on the container metadata.
func (c *criContainerdService) stopContainer(ctx context.Context, container containerstore.Container, timeout time.Duration) error { func (c *criService) stopContainer(ctx context.Context, container containerstore.Container, timeout time.Duration) error {
id := container.ID id := container.ID
// Return without error if container is not running. This makes sure that // Return without error if container is not running. This makes sure that
@ -71,27 +71,27 @@ func (c *criContainerdService) stopContainer(ctx context.Context, container cont
// deleted and image is garbage collected before this point. However, // deleted and image is garbage collected before this point. However,
// the chance is really slim, even it happens, it's still fine to return // the chance is really slim, even it happens, it's still fine to return
// an error here. // an error here.
return fmt.Errorf("failed to get image metadata %q: %v", container.ImageRef, err) return errors.Wrapf(err, "failed to get image metadata %q", container.ImageRef)
} }
if image.ImageSpec.Config.StopSignal != "" { if image.ImageSpec.Config.StopSignal != "" {
stopSignal, err = signal.ParseSignal(image.ImageSpec.Config.StopSignal) stopSignal, err = signal.ParseSignal(image.ImageSpec.Config.StopSignal)
if err != nil { if err != nil {
return fmt.Errorf("failed to parse stop signal %q: %v", return errors.Wrapf(err, "failed to parse stop signal %q",
image.ImageSpec.Config.StopSignal, err) image.ImageSpec.Config.StopSignal)
} }
} }
logrus.Infof("Stop container %q with signal %v", id, stopSignal) logrus.Infof("Stop container %q with signal %v", id, stopSignal)
task, err := container.Container.Task(ctx, nil) task, err := container.Container.Task(ctx, nil)
if err != nil { if err != nil {
if !errdefs.IsNotFound(err) { if !errdefs.IsNotFound(err) {
return fmt.Errorf("failed to stop container, task not found for container %q: %v", id, err) return errors.Wrapf(err, "failed to stop container, task not found for container %q", id)
} }
return nil return nil
} }
if task != nil { if task != nil {
if err = task.Kill(ctx, stopSignal); err != nil { if err = task.Kill(ctx, stopSignal); err != nil {
if !errdefs.IsNotFound(err) { if !errdefs.IsNotFound(err) {
return fmt.Errorf("failed to stop container %q: %v", id, err) return errors.Wrapf(err, "failed to stop container %q", id)
} }
// Move on to make sure container status is updated. // Move on to make sure container status is updated.
} }
@ -107,7 +107,7 @@ func (c *criContainerdService) stopContainer(ctx context.Context, container cont
task, err := container.Container.Task(ctx, nil) task, err := container.Container.Task(ctx, nil)
if err != nil { if err != nil {
if !errdefs.IsNotFound(err) { if !errdefs.IsNotFound(err) {
return fmt.Errorf("failed to stop container, task not found for container %q: %v", id, err) return errors.Wrapf(err, "failed to stop container, task not found for container %q", id)
} }
return nil return nil
} }
@ -116,7 +116,7 @@ func (c *criContainerdService) stopContainer(ctx context.Context, container cont
if task != nil { if task != nil {
if err = task.Kill(ctx, unix.SIGKILL, containerd.WithKillAll); err != nil { if err = task.Kill(ctx, unix.SIGKILL, containerd.WithKillAll); err != nil {
if !errdefs.IsNotFound(err) { if !errdefs.IsNotFound(err) {
return fmt.Errorf("failed to kill container %q: %v", id, err) return errors.Wrapf(err, "failed to kill container %q", id)
} }
// Move on to make sure container status is updated. // Move on to make sure container status is updated.
} }
@ -124,20 +124,20 @@ func (c *criContainerdService) stopContainer(ctx context.Context, container cont
// Wait for a fixed timeout until container stop is observed by event monitor. // Wait for a fixed timeout until container stop is observed by event monitor.
if err := c.waitContainerStop(ctx, container, killContainerTimeout); err != nil { if err := c.waitContainerStop(ctx, container, killContainerTimeout); err != nil {
return fmt.Errorf("an error occurs during waiting for container %q to stop: %v", id, err) return errors.Wrapf(err, "an error occurs during waiting for container %q to stop", id)
} }
return nil return nil
} }
// waitContainerStop waits for container to be stopped until timeout exceeds or context is cancelled. // waitContainerStop waits for container to be stopped until timeout exceeds or context is cancelled.
func (c *criContainerdService) waitContainerStop(ctx context.Context, container containerstore.Container, timeout time.Duration) error { func (c *criService) waitContainerStop(ctx context.Context, container containerstore.Container, timeout time.Duration) error {
timeoutTimer := time.NewTimer(timeout) timeoutTimer := time.NewTimer(timeout)
defer timeoutTimer.Stop() defer timeoutTimer.Stop()
select { select {
case <-ctx.Done(): case <-ctx.Done():
return fmt.Errorf("wait container %q is cancelled", container.ID) return errors.Errorf("wait container %q is cancelled", container.ID)
case <-timeoutTimer.C: case <-timeoutTimer.C:
return fmt.Errorf("wait container %q stop timeout", container.ID) return errors.Errorf("wait container %q stop timeout", container.ID)
case <-container.Stopped(): case <-container.Stopped():
return nil return nil
} }

View File

@ -18,13 +18,13 @@ package server
import ( import (
gocontext "context" gocontext "context"
"fmt"
"github.com/containerd/containerd" "github.com/containerd/containerd"
"github.com/containerd/containerd/containers" "github.com/containerd/containerd/containers"
"github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/errdefs"
"github.com/containerd/typeurl" "github.com/containerd/typeurl"
runtimespec "github.com/opencontainers/runtime-spec/specs-go" runtimespec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"golang.org/x/net/context" "golang.org/x/net/context"
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
@ -35,10 +35,10 @@ import (
) )
// UpdateContainerResources updates ContainerConfig of the container. // UpdateContainerResources updates ContainerConfig of the container.
func (c *criContainerdService) UpdateContainerResources(ctx context.Context, r *runtime.UpdateContainerResourcesRequest) (retRes *runtime.UpdateContainerResourcesResponse, retErr error) { func (c *criService) UpdateContainerResources(ctx context.Context, r *runtime.UpdateContainerResourcesRequest) (retRes *runtime.UpdateContainerResourcesResponse, retErr error) {
container, err := c.containerStore.Get(r.GetContainerId()) container, err := c.containerStore.Get(r.GetContainerId())
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to find container: %v", err) return nil, errors.Wrap(err, "failed to find container")
} }
// Update resources in status update transaction, so that: // Update resources in status update transaction, so that:
// 1) There won't be race condition with container start. // 1) There won't be race condition with container start.
@ -46,19 +46,19 @@ func (c *criContainerdService) UpdateContainerResources(ctx context.Context, r *
if err := container.Status.Update(func(status containerstore.Status) (containerstore.Status, error) { if err := container.Status.Update(func(status containerstore.Status) (containerstore.Status, error) {
return status, c.updateContainerResources(ctx, container, r.GetLinux(), status) return status, c.updateContainerResources(ctx, container, r.GetLinux(), status)
}); err != nil { }); err != nil {
return nil, fmt.Errorf("failed to update resources: %v", err) return nil, errors.Wrap(err, "failed to update resources")
} }
return &runtime.UpdateContainerResourcesResponse{}, nil return &runtime.UpdateContainerResourcesResponse{}, nil
} }
func (c *criContainerdService) updateContainerResources(ctx context.Context, func (c *criService) updateContainerResources(ctx context.Context,
cntr containerstore.Container, cntr containerstore.Container,
resources *runtime.LinuxContainerResources, resources *runtime.LinuxContainerResources,
status containerstore.Status) (retErr error) { status containerstore.Status) (retErr error) {
id := cntr.ID id := cntr.ID
// Do not update the container when there is a removal in progress. // Do not update the container when there is a removal in progress.
if status.Removing { if status.Removing {
return fmt.Errorf("container %q is in removing state", id) return errors.Errorf("container %q is in removing state", id)
} }
// Update container spec. If the container is not started yet, updating // Update container spec. If the container is not started yet, updating
@ -67,11 +67,11 @@ func (c *criContainerdService) updateContainerResources(ctx context.Context,
// the spec will become our source of truth for resource limits. // the spec will become our source of truth for resource limits.
oldSpec, err := cntr.Container.Spec(ctx) oldSpec, err := cntr.Container.Spec(ctx)
if err != nil { if err != nil {
return fmt.Errorf("failed to get container spec: %v", err) return errors.Wrap(err, "failed to get container spec")
} }
newSpec, err := updateOCILinuxResource(oldSpec, resources) newSpec, err := updateOCILinuxResource(oldSpec, resources)
if err != nil { if err != nil {
return fmt.Errorf("failed to update resource in spec: %v", err) return errors.Wrap(err, "failed to update resource in spec")
} }
if err := updateContainerSpec(ctx, cntr.Container, newSpec); err != nil { if err := updateContainerSpec(ctx, cntr.Container, newSpec); err != nil {
@ -100,7 +100,7 @@ func (c *criContainerdService) updateContainerResources(ctx context.Context,
// Task exited already. // Task exited already.
return nil return nil
} }
return fmt.Errorf("failed to get task: %v", err) return errors.Wrap(err, "failed to get task")
} }
// newSpec.Linux won't be nil // newSpec.Linux won't be nil
if err := task.Update(ctx, containerd.WithResources(newSpec.Linux.Resources)); err != nil { if err := task.Update(ctx, containerd.WithResources(newSpec.Linux.Resources)); err != nil {
@ -108,7 +108,7 @@ func (c *criContainerdService) updateContainerResources(ctx context.Context,
// Task exited already. // Task exited already.
return nil return nil
} }
return fmt.Errorf("failed to update resources: %v", err) return errors.Wrap(err, "failed to update resources")
} }
return nil return nil
} }
@ -117,13 +117,13 @@ func (c *criContainerdService) updateContainerResources(ctx context.Context,
func updateContainerSpec(ctx context.Context, cntr containerd.Container, spec *runtimespec.Spec) error { func updateContainerSpec(ctx context.Context, cntr containerd.Container, spec *runtimespec.Spec) error {
any, err := typeurl.MarshalAny(spec) any, err := typeurl.MarshalAny(spec)
if err != nil { if err != nil {
return fmt.Errorf("failed to marshal spec %+v: %v", spec, err) return errors.Wrapf(err, "failed to marshal spec %+v", spec)
} }
if err := cntr.Update(ctx, func(ctx gocontext.Context, client *containerd.Client, c *containers.Container) error { if err := cntr.Update(ctx, func(ctx gocontext.Context, client *containerd.Client, c *containers.Container) error {
c.Spec = any c.Spec = any
return nil return nil
}); err != nil { }); err != nil {
return fmt.Errorf("failed to update container spec: %v", err) return errors.Wrap(err, "failed to update container spec")
} }
return nil return nil
} }
@ -133,7 +133,7 @@ func updateOCILinuxResource(spec *runtimespec.Spec, new *runtime.LinuxContainerR
// Copy to make sure old spec is not changed. // Copy to make sure old spec is not changed.
var cloned runtimespec.Spec var cloned runtimespec.Spec
if err := util.DeepCopy(&cloned, spec); err != nil { if err := util.DeepCopy(&cloned, spec); err != nil {
return nil, fmt.Errorf("failed to deep copy: %v", err) return nil, errors.Wrap(err, "failed to deep copy")
} }
g := newSpecGenerator(&cloned) g := newSpecGenerator(&cloned)

View File

@ -17,15 +17,18 @@ limitations under the License.
package server package server
import ( import (
"errors" "time"
eventtypes "github.com/containerd/containerd/api/events" eventtypes "github.com/containerd/containerd/api/events"
containerdio "github.com/containerd/containerd/cio" containerdio "github.com/containerd/containerd/cio"
"github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/events" "github.com/containerd/containerd/events"
"github.com/containerd/typeurl" "github.com/containerd/typeurl"
gogotypes "github.com/gogo/protobuf/types"
"github.com/pkg/errors"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"golang.org/x/net/context" "golang.org/x/net/context"
"k8s.io/apimachinery/pkg/util/clock"
ctrdutil "github.com/containerd/cri/pkg/containerd/util" ctrdutil "github.com/containerd/cri/pkg/containerd/util"
"github.com/containerd/cri/pkg/store" "github.com/containerd/cri/pkg/store"
@ -33,6 +36,12 @@ import (
sandboxstore "github.com/containerd/cri/pkg/store/sandbox" sandboxstore "github.com/containerd/cri/pkg/store/sandbox"
) )
const (
backOffInitDuration = 1 * time.Second
backOffMaxDuration = 5 * time.Minute
backOffExpireCheckDuration = 1 * time.Second
)
// eventMonitor monitors containerd event and updates internal state correspondingly. // eventMonitor monitors containerd event and updates internal state correspondingly.
// TODO(random-liu): [P1] Figure out is it possible to drop event during containerd // TODO(random-liu): [P1] Figure out is it possible to drop event during containerd
// is running. If it is, we should do periodically list to sync state with containerd. // is running. If it is, we should do periodically list to sync state with containerd.
@ -43,6 +52,23 @@ type eventMonitor struct {
errCh <-chan error errCh <-chan error
ctx context.Context ctx context.Context
cancel context.CancelFunc cancel context.CancelFunc
backOff *backOff
}
type backOff struct {
queuePool map[string]*backOffQueue
ticker *time.Ticker
minDuration time.Duration
maxDuration time.Duration
checkDuration time.Duration
clock clock.Clock
}
type backOffQueue struct {
events []interface{}
expireTime time.Time
duration time.Duration
clock clock.Clock
} }
// Create new event monitor. New event monitor will start subscribing containerd event. All events // Create new event monitor. New event monitor will start subscribing containerd event. All events
@ -55,6 +81,7 @@ func newEventMonitor(c *containerstore.Store, s *sandboxstore.Store) *eventMonit
sandboxStore: s, sandboxStore: s,
ctx: ctx, ctx: ctx,
cancel: cancel, cancel: cancel,
backOff: newBackOff(),
} }
} }
@ -67,6 +94,24 @@ func (em *eventMonitor) subscribe(subscriber events.Subscriber) {
em.ch, em.errCh = subscriber.Subscribe(em.ctx, filters...) em.ch, em.errCh = subscriber.Subscribe(em.ctx, filters...)
} }
func convertEvent(e *gogotypes.Any) (string, interface{}, error) {
containerID := ""
evt, err := typeurl.UnmarshalAny(e)
if err != nil {
return "", nil, errors.Wrap(err, "failed to unmarshalany")
}
switch evt.(type) {
case *eventtypes.TaskExit:
containerID = evt.(*eventtypes.TaskExit).ContainerID
case *eventtypes.TaskOOM:
containerID = evt.(*eventtypes.TaskOOM).ContainerID
default:
return "", nil, errors.New("unsupported event")
}
return containerID, evt, nil
}
// start starts the event monitor which monitors and handles all container events. It returns // start starts the event monitor which monitors and handles all container events. It returns
// a channel for the caller to wait for the event monitor to stop. start must be called after // a channel for the caller to wait for the event monitor to stop. start must be called after
// subscribe. // subscribe.
@ -76,15 +121,41 @@ func (em *eventMonitor) start() (<-chan struct{}, error) {
} }
closeCh := make(chan struct{}) closeCh := make(chan struct{})
go func() { go func() {
backOffCheckCh := em.backOff.start()
for { for {
select { select {
case e := <-em.ch: case e := <-em.ch:
logrus.Debugf("Received containerd event timestamp - %v, namespace - %q, topic - %q", e.Timestamp, e.Namespace, e.Topic) logrus.Debugf("Received containerd event timestamp - %v, namespace - %q, topic - %q", e.Timestamp, e.Namespace, e.Topic)
em.handleEvent(e) cID, evt, err := convertEvent(e.Event)
if err != nil {
logrus.WithError(err).Errorf("Failed to convert event %+v", e)
break
}
if em.backOff.isInBackOff(cID) {
logrus.Infof("Events for container %q is in backoff, enqueue event %+v", cID, evt)
em.backOff.enBackOff(cID, evt)
break
}
if err := em.handleEvent(evt); err != nil {
logrus.WithError(err).Errorf("Failed to handle event %+v for container %s", evt, cID)
em.backOff.enBackOff(cID, evt)
}
case err := <-em.errCh: case err := <-em.errCh:
logrus.WithError(err).Error("Failed to handle event stream") logrus.WithError(err).Error("Failed to handle event stream")
close(closeCh) close(closeCh)
return return
case <-backOffCheckCh:
cIDs := em.backOff.getExpiredContainers()
for _, cID := range cIDs {
queue := em.backOff.deBackOff(cID)
for i, any := range queue.events {
if err := em.handleEvent(any); err != nil {
logrus.WithError(err).Errorf("Failed to handle backOff event %+v for container %s", any, cID)
em.backOff.reBackOff(cID, queue.events[i:], queue.duration)
break
}
}
}
} }
} }
}() }()
@ -94,17 +165,13 @@ func (em *eventMonitor) start() (<-chan struct{}, error) {
// stop stops the event monitor. It will close the event channel. // stop stops the event monitor. It will close the event channel.
// Once event monitor is stopped, it can't be started. // Once event monitor is stopped, it can't be started.
func (em *eventMonitor) stop() { func (em *eventMonitor) stop() {
em.backOff.stop()
em.cancel() em.cancel()
} }
// handleEvent handles a containerd event. // handleEvent handles a containerd event.
func (em *eventMonitor) handleEvent(evt *events.Envelope) { func (em *eventMonitor) handleEvent(any interface{}) error {
ctx := ctrdutil.NamespacedContext() ctx := ctrdutil.NamespacedContext()
any, err := typeurl.UnmarshalAny(evt.Event)
if err != nil {
logrus.WithError(err).Errorf("Failed to convert event envelope %+v", evt)
return
}
switch any.(type) { switch any.(type) {
// If containerd-shim exits unexpectedly, there will be no corresponding event. // If containerd-shim exits unexpectedly, there will be no corresponding event.
// However, containerd could not retrieve container state in that case, so it's // However, containerd could not retrieve container state in that case, so it's
@ -112,51 +179,59 @@ func (em *eventMonitor) handleEvent(evt *events.Envelope) {
// TODO(random-liu): [P2] Handle containerd-shim exit. // TODO(random-liu): [P2] Handle containerd-shim exit.
case *eventtypes.TaskExit: case *eventtypes.TaskExit:
e := any.(*eventtypes.TaskExit) e := any.(*eventtypes.TaskExit)
logrus.Infof("TaskExit event %+v", e)
cntr, err := em.containerStore.Get(e.ContainerID) cntr, err := em.containerStore.Get(e.ContainerID)
if err == nil { if err == nil {
handleContainerExit(ctx, e, cntr) if err := handleContainerExit(ctx, e, cntr); err != nil {
return return errors.Wrap(err, "failed to handle container TaskExit event")
}
return nil
} else if err != store.ErrNotExist { } else if err != store.ErrNotExist {
logrus.WithError(err).Errorf("Failed to get container %q", e.ContainerID) return errors.Wrap(err, "can't find container for TaskExit event")
return
} }
// Use GetAll to include sandbox in unknown state. // Use GetAll to include sandbox in unknown state.
sb, err := em.sandboxStore.GetAll(e.ContainerID) sb, err := em.sandboxStore.GetAll(e.ContainerID)
if err == nil { if err == nil {
handleSandboxExit(ctx, e, sb) if err := handleSandboxExit(ctx, e, sb); err != nil {
return return errors.Wrap(err, "failed to handle sandbox TaskExit event")
} else if err != store.ErrNotExist {
logrus.WithError(err).Errorf("Failed to get sandbox %q", e.ContainerID)
return
} }
return nil
} else if err != store.ErrNotExist {
return errors.Wrap(err, "can't find sandbox for TaskExit event")
}
return nil
case *eventtypes.TaskOOM: case *eventtypes.TaskOOM:
e := any.(*eventtypes.TaskOOM) e := any.(*eventtypes.TaskOOM)
logrus.Infof("TaskOOM event %+v", e) logrus.Infof("TaskOOM event %+v", e)
cntr, err := em.containerStore.Get(e.ContainerID) cntr, err := em.containerStore.Get(e.ContainerID)
if err != nil { if err != nil {
if _, err := em.sandboxStore.Get(e.ContainerID); err == nil { if err != store.ErrNotExist {
return return errors.Wrap(err, "can't find container for TaskOOM event")
} }
logrus.WithError(err).Errorf("Failed to get container %q", e.ContainerID) if _, err = em.sandboxStore.Get(e.ContainerID); err != nil {
return if err != store.ErrNotExist {
return errors.Wrap(err, "can't find sandbox for TaskOOM event")
}
return nil
}
return nil
} }
err = cntr.Status.UpdateSync(func(status containerstore.Status) (containerstore.Status, error) { err = cntr.Status.UpdateSync(func(status containerstore.Status) (containerstore.Status, error) {
status.Reason = oomExitReason status.Reason = oomExitReason
return status, nil return status, nil
}) })
if err != nil { if err != nil {
logrus.WithError(err).Errorf("Failed to update container %q oom", e.ContainerID) return errors.Wrap(err, "failed to update container status for TaskOOM event")
return
} }
} }
return nil
} }
// handleContainerExit handles TaskExit event for container. // handleContainerExit handles TaskExit event for container.
func handleContainerExit(ctx context.Context, e *eventtypes.TaskExit, cntr containerstore.Container) { func handleContainerExit(ctx context.Context, e *eventtypes.TaskExit, cntr containerstore.Container) error {
if e.Pid != cntr.Status.Get().Pid { if e.Pid != cntr.Status.Get().Pid {
// Non-init process died, ignore the event. // Non-init process died, ignore the event.
return return nil
} }
// Attach container IO so that `Delete` could cleanup the stream properly. // Attach container IO so that `Delete` could cleanup the stream properly.
task, err := cntr.Container.Task(ctx, task, err := cntr.Container.Task(ctx,
@ -166,16 +241,13 @@ func handleContainerExit(ctx context.Context, e *eventtypes.TaskExit, cntr conta
) )
if err != nil { if err != nil {
if !errdefs.IsNotFound(err) { if !errdefs.IsNotFound(err) {
logrus.WithError(err).Errorf("failed to load task for container %q", e.ContainerID) return errors.Wrapf(err, "failed to load task for container")
return
} }
} else { } else {
// TODO(random-liu): [P1] This may block the loop, we may want to spawn a worker // TODO(random-liu): [P1] This may block the loop, we may want to spawn a worker
if _, err = task.Delete(ctx); err != nil { if _, err = task.Delete(ctx); err != nil {
// TODO(random-liu): [P0] Enqueue the event and retry.
if !errdefs.IsNotFound(err) { if !errdefs.IsNotFound(err) {
logrus.WithError(err).Errorf("failed to stop container %q", e.ContainerID) return errors.Wrap(err, "failed to stop container")
return
} }
// Move on to make sure container status is updated. // Move on to make sure container status is updated.
} }
@ -192,34 +264,30 @@ func handleContainerExit(ctx context.Context, e *eventtypes.TaskExit, cntr conta
return status, nil return status, nil
}) })
if err != nil { if err != nil {
logrus.WithError(err).Errorf("Failed to update container %q state", e.ContainerID) return errors.Wrap(err, "failed to update container state")
// TODO(random-liu): [P0] Enqueue the event and retry.
return
} }
// Using channel to propagate the information of container stop // Using channel to propagate the information of container stop
cntr.Stop() cntr.Stop()
return nil
} }
// handleSandboxExit handles TaskExit event for sandbox. // handleSandboxExit handles TaskExit event for sandbox.
func handleSandboxExit(ctx context.Context, e *eventtypes.TaskExit, sb sandboxstore.Sandbox) { func handleSandboxExit(ctx context.Context, e *eventtypes.TaskExit, sb sandboxstore.Sandbox) error {
if e.Pid != sb.Status.Get().Pid { if e.Pid != sb.Status.Get().Pid {
// Non-init process died, ignore the event. // Non-init process died, ignore the event.
return return nil
} }
// No stream attached to sandbox container. // No stream attached to sandbox container.
task, err := sb.Container.Task(ctx, nil) task, err := sb.Container.Task(ctx, nil)
if err != nil { if err != nil {
if !errdefs.IsNotFound(err) { if !errdefs.IsNotFound(err) {
logrus.WithError(err).Errorf("failed to load task for sandbox %q", e.ContainerID) return errors.Wrap(err, "failed to load task for sandbox")
return
} }
} else { } else {
// TODO(random-liu): [P1] This may block the loop, we may want to spawn a worker // TODO(random-liu): [P1] This may block the loop, we may want to spawn a worker
if _, err = task.Delete(ctx); err != nil { if _, err = task.Delete(ctx); err != nil {
// TODO(random-liu): [P0] Enqueue the event and retry.
if !errdefs.IsNotFound(err) { if !errdefs.IsNotFound(err) {
logrus.WithError(err).Errorf("failed to stop sandbox %q", e.ContainerID) return errors.Wrap(err, "failed to stop sandbox")
return
} }
// Move on to make sure container status is updated. // Move on to make sure container status is updated.
} }
@ -238,10 +306,84 @@ func handleSandboxExit(ctx context.Context, e *eventtypes.TaskExit, sb sandboxst
return status, nil return status, nil
}) })
if err != nil { if err != nil {
logrus.WithError(err).Errorf("Failed to update sandbox %q state", e.ContainerID) return errors.Wrap(err, "failed to update sandbox state")
// TODO(random-liu): [P0] Enqueue the event and retry.
return
} }
// Using channel to propagate the information of sandbox stop // Using channel to propagate the information of sandbox stop
sb.Stop() sb.Stop()
return nil
}
func newBackOff() *backOff {
return &backOff{
queuePool: map[string]*backOffQueue{},
minDuration: backOffInitDuration,
maxDuration: backOffMaxDuration,
checkDuration: backOffExpireCheckDuration,
clock: clock.RealClock{},
}
}
func (b *backOff) getExpiredContainers() []string {
var containers []string
for c, q := range b.queuePool {
if q.isExpire() {
containers = append(containers, c)
}
}
return containers
}
func (b *backOff) isInBackOff(key string) bool {
if _, ok := b.queuePool[key]; ok {
return true
}
return false
}
// enBackOff start to backOff and put event to the tail of queue
func (b *backOff) enBackOff(key string, evt interface{}) {
if queue, ok := b.queuePool[key]; ok {
queue.events = append(queue.events, evt)
return
}
b.queuePool[key] = newBackOffQueue([]interface{}{evt}, b.minDuration, b.clock)
}
// enBackOff get out the whole queue
func (b *backOff) deBackOff(key string) *backOffQueue {
queue := b.queuePool[key]
delete(b.queuePool, key)
return queue
}
// enBackOff start to backOff again and put events to the queue
func (b *backOff) reBackOff(key string, events []interface{}, oldDuration time.Duration) {
duration := 2 * oldDuration
if duration > b.maxDuration {
duration = b.maxDuration
}
b.queuePool[key] = newBackOffQueue(events, duration, b.clock)
}
func (b *backOff) start() <-chan time.Time {
b.ticker = time.NewTicker(b.checkDuration)
return b.ticker.C
}
func (b *backOff) stop() {
b.ticker.Stop()
}
func newBackOffQueue(events []interface{}, init time.Duration, c clock.Clock) *backOffQueue {
return &backOffQueue{
events: events,
duration: init,
expireTime: c.Now().Add(init),
clock: c,
}
}
func (q *backOffQueue) isExpire() bool {
// return time.Now >= expireTime
return !q.clock.Now().Before(q.expireTime)
} }

View File

@ -19,14 +19,16 @@ package server
import ( import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"os/exec"
"path" "path"
"path/filepath" "path/filepath"
"strconv" "strconv"
"strings" "strings"
"github.com/containerd/containerd" "github.com/containerd/containerd"
"github.com/containerd/containerd/containers"
"github.com/containerd/containerd/content" "github.com/containerd/containerd/content"
"github.com/containerd/containerd/linux/runctypes"
"github.com/containerd/typeurl"
"github.com/docker/distribution/reference" "github.com/docker/distribution/reference"
imagedigest "github.com/opencontainers/go-digest" imagedigest "github.com/opencontainers/go-digest"
"github.com/opencontainers/image-spec/identity" "github.com/opencontainers/image-spec/identity"
@ -35,10 +37,11 @@ import (
"github.com/opencontainers/runtime-tools/generate" "github.com/opencontainers/runtime-tools/generate"
"github.com/opencontainers/selinux/go-selinux" "github.com/opencontainers/selinux/go-selinux"
"github.com/opencontainers/selinux/go-selinux/label" "github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
"golang.org/x/net/context" "golang.org/x/net/context"
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
"k8s.io/kubernetes/pkg/util/sysctl"
criconfig "github.com/containerd/cri/pkg/config"
"github.com/containerd/cri/pkg/store" "github.com/containerd/cri/pkg/store"
imagestore "github.com/containerd/cri/pkg/store/image" imagestore "github.com/containerd/cri/pkg/store/image"
"github.com/containerd/cri/pkg/util" "github.com/containerd/cri/pkg/util"
@ -109,6 +112,14 @@ const (
containerMetadataExtension = criContainerdPrefix + ".container.metadata" containerMetadataExtension = criContainerdPrefix + ".container.metadata"
) )
const (
// defaultIfName is the default network interface for the pods
defaultIfName = "eth0"
// networkAttachCount is the minimum number of networks the PodSandbox
// attaches to
networkAttachCount = 2
)
// makeSandboxName generates sandbox name from sandbox metadata. The name // makeSandboxName generates sandbox name from sandbox metadata. The name
// generated is unique as long as sandbox metadata is unique. // generated is unique as long as sandbox metadata is unique.
func makeSandboxName(s *runtime.PodSandboxMetadata) string { func makeSandboxName(s *runtime.PodSandboxMetadata) string {
@ -126,9 +137,9 @@ func makeSandboxName(s *runtime.PodSandboxMetadata) string {
func makeContainerName(c *runtime.ContainerMetadata, s *runtime.PodSandboxMetadata) string { func makeContainerName(c *runtime.ContainerMetadata, s *runtime.PodSandboxMetadata) string {
return strings.Join([]string{ return strings.Join([]string{
c.Name, // 0 c.Name, // 0
s.Name, // 1: sandbox name s.Name, // 1: pod name
s.Namespace, // 2: sandbox namespace s.Namespace, // 2: pod namespace
s.Uid, // 3: sandbox uid s.Uid, // 3: pod uid
fmt.Sprintf("%d", c.Attempt), // 4 fmt.Sprintf("%d", c.Attempt), // 4
}, nameDelimiter) }, nameDelimiter)
} }
@ -145,29 +156,42 @@ func getCgroupsPath(cgroupsParent, id string, systemdCgroup bool) string {
} }
// getSandboxRootDir returns the root directory for managing sandbox files, // getSandboxRootDir returns the root directory for managing sandbox files,
// e.g. named pipes. // e.g. hosts files.
func getSandboxRootDir(rootDir, id string) string { func (c *criService) getSandboxRootDir(id string) string {
return filepath.Join(rootDir, sandboxesDir, id) return filepath.Join(c.config.RootDir, sandboxesDir, id)
} }
// getContainerRootDir returns the root directory for managing container files. // getVolatileSandboxRootDir returns the root directory for managing volatile sandbox files,
func getContainerRootDir(rootDir, id string) string { // e.g. named pipes.
return filepath.Join(rootDir, containersDir, id) func (c *criService) getVolatileSandboxRootDir(id string) string {
return filepath.Join(c.config.StateDir, sandboxesDir, id)
}
// getContainerRootDir returns the root directory for managing container files,
// e.g. state checkpoint.
func (c *criService) getContainerRootDir(id string) string {
return filepath.Join(c.config.RootDir, containersDir, id)
}
// getVolatileContainerRootDir returns the root directory for managing volatile container files,
// e.g. named pipes.
func (c *criService) getVolatileContainerRootDir(id string) string {
return filepath.Join(c.config.StateDir, containersDir, id)
} }
// getSandboxHosts returns the hosts file path inside the sandbox root directory. // getSandboxHosts returns the hosts file path inside the sandbox root directory.
func getSandboxHosts(sandboxRootDir string) string { func (c *criService) getSandboxHosts(id string) string {
return filepath.Join(sandboxRootDir, "hosts") return filepath.Join(c.getSandboxRootDir(id), "hosts")
} }
// getResolvPath returns resolv.conf filepath for specified sandbox. // getResolvPath returns resolv.conf filepath for specified sandbox.
func getResolvPath(sandboxRoot string) string { func (c *criService) getResolvPath(id string) string {
return filepath.Join(sandboxRoot, "resolv.conf") return filepath.Join(c.getSandboxRootDir(id), "resolv.conf")
} }
// getSandboxDevShm returns the shm file path inside the sandbox root directory. // getSandboxDevShm returns the shm file path inside the sandbox root directory.
func getSandboxDevShm(sandboxRootDir string) string { func (c *criService) getSandboxDevShm(id string) string {
return filepath.Join(sandboxRootDir, "shm") return filepath.Join(c.getVolatileSandboxRootDir(id), "shm")
} }
// getNetworkNamespace returns the network namespace of a process. // getNetworkNamespace returns the network namespace of a process.
@ -212,7 +236,7 @@ func getRepoDigestAndTag(namedRef reference.Named, digest imagedigest.Digest, sc
// localResolve resolves image reference locally and returns corresponding image metadata. It returns // localResolve resolves image reference locally and returns corresponding image metadata. It returns
// nil without error if the reference doesn't exist. // nil without error if the reference doesn't exist.
func (c *criContainerdService) localResolve(ctx context.Context, refOrID string) (*imagestore.Image, error) { func (c *criService) localResolve(ctx context.Context, refOrID string) (*imagestore.Image, error) {
getImageID := func(refOrId string) string { getImageID := func(refOrId string) string {
if _, err := imagedigest.Parse(refOrID); err == nil { if _, err := imagedigest.Parse(refOrID); err == nil {
return refOrID return refOrID
@ -245,7 +269,7 @@ func (c *criContainerdService) localResolve(ctx context.Context, refOrID string)
if err == store.ErrNotExist { if err == store.ErrNotExist {
return nil, nil return nil, nil
} }
return nil, fmt.Errorf("failed to get image %q : %v", imageID, err) return nil, errors.Wrapf(err, "failed to get image %q", imageID)
} }
return &image, nil return &image, nil
} }
@ -271,10 +295,10 @@ func getUserFromImage(user string) (*int64, string) {
// ensureImageExists returns corresponding metadata of the image reference, if image is not // ensureImageExists returns corresponding metadata of the image reference, if image is not
// pulled yet, the function will pull the image. // pulled yet, the function will pull the image.
func (c *criContainerdService) ensureImageExists(ctx context.Context, ref string) (*imagestore.Image, error) { func (c *criService) ensureImageExists(ctx context.Context, ref string) (*imagestore.Image, error) {
image, err := c.localResolve(ctx, ref) image, err := c.localResolve(ctx, ref)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to resolve image %q: %v", ref, err) return nil, errors.Wrapf(err, "failed to resolve image %q", ref)
} }
if image != nil { if image != nil {
return image, nil return image, nil
@ -282,13 +306,13 @@ func (c *criContainerdService) ensureImageExists(ctx context.Context, ref string
// Pull image to ensure the image exists // Pull image to ensure the image exists
resp, err := c.PullImage(ctx, &runtime.PullImageRequest{Image: &runtime.ImageSpec{Image: ref}}) resp, err := c.PullImage(ctx, &runtime.PullImageRequest{Image: &runtime.ImageSpec{Image: ref}})
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to pull image %q: %v", ref, err) return nil, errors.Wrapf(err, "failed to pull image %q", ref)
} }
imageID := resp.GetImageRef() imageID := resp.GetImageRef()
newImage, err := c.imageStore.Get(imageID) newImage, err := c.imageStore.Get(imageID)
if err != nil { if err != nil {
// It's still possible that someone removed the image right after it is pulled. // It's still possible that someone removed the image right after it is pulled.
return nil, fmt.Errorf("failed to get image %q metadata after pulling: %v", imageID, err) return nil, errors.Wrapf(err, "failed to get image %q metadata after pulling", imageID)
} }
return &newImage, nil return &newImage, nil
} }
@ -306,28 +330,28 @@ func getImageInfo(ctx context.Context, image containerd.Image) (*imageInfo, erro
// Get image information. // Get image information.
diffIDs, err := image.RootFS(ctx) diffIDs, err := image.RootFS(ctx)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to get image diffIDs: %v", err) return nil, errors.Wrap(err, "failed to get image diffIDs")
} }
chainID := identity.ChainID(diffIDs) chainID := identity.ChainID(diffIDs)
size, err := image.Size(ctx) size, err := image.Size(ctx)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to get image compressed resource size: %v", err) return nil, errors.Wrap(err, "failed to get image compressed resource size")
} }
desc, err := image.Config(ctx) desc, err := image.Config(ctx)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to get image config descriptor: %v", err) return nil, errors.Wrap(err, "failed to get image config descriptor")
} }
id := desc.Digest.String() id := desc.Digest.String()
rb, err := content.ReadBlob(ctx, image.ContentStore(), desc.Digest) rb, err := content.ReadBlob(ctx, image.ContentStore(), desc.Digest)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to read image config from content store: %v", err) return nil, errors.Wrap(err, "failed to read image config from content store")
} }
var ociimage imagespec.Image var ociimage imagespec.Image
if err := json.Unmarshal(rb, &ociimage); err != nil { if err := json.Unmarshal(rb, &ociimage); err != nil {
return nil, fmt.Errorf("failed to unmarshal image config %s: %v", rb, err) return nil, errors.Wrapf(err, "failed to unmarshal image config %s", rb)
} }
return &imageInfo{ return &imageInfo{
@ -392,34 +416,31 @@ func newSpecGenerator(spec *runtimespec.Spec) generate.Generator {
return g return g
} }
// disableNetNSDAD disables duplicate address detection in the network namespace. func getPodCNILabels(id string, config *runtime.PodSandboxConfig) map[string]string {
// DAD has a negative affect on sandbox start latency, since we have to wait return map[string]string{
// a second or more for the addresses to leave the "tentative" state. "K8S_POD_NAMESPACE": config.GetMetadata().GetNamespace(),
func disableNetNSDAD(ns string) error { "K8S_POD_NAME": config.GetMetadata().GetName(),
dad := "net/ipv6/conf/default/accept_dad" "K8S_POD_INFRA_CONTAINER_ID": id,
"IgnoreUnknown": "1",
sysctlBin, err := exec.LookPath("sysctl")
if err != nil {
return fmt.Errorf("could not find sysctl binary: %v", err)
} }
}
nsenterBin, err := exec.LookPath("nsenter")
if err != nil { // getRuntimeConfigFromContainerInfo gets runtime configuration from containerd
return fmt.Errorf("could not find nsenter binary: %v", err) // container info.
} func getRuntimeConfigFromContainerInfo(c containers.Container) (criconfig.Runtime, error) {
r := criconfig.Runtime{
// If the sysctl doesn't exist, it means ipv6 is disabled. Type: c.Runtime.Name,
if _, err := sysctl.New().GetSysctl(dad); err != nil { }
return nil if c.Runtime.Options == nil {
} // CRI plugin makes sure that runtime option is always set.
return criconfig.Runtime{}, errors.New("runtime options is nil")
output, err := exec.Command(nsenterBin, }
fmt.Sprintf("--net=%s", ns), "-F", "--", data, err := typeurl.UnmarshalAny(c.Runtime.Options)
sysctlBin, "-w", fmt.Sprintf("%s=%s", dad, "0"), if err != nil {
).CombinedOutput() return criconfig.Runtime{}, errors.Wrap(err, "failed to unmarshal runtime options")
if err != nil { }
return fmt.Errorf("failed to write sysctl %q - output: %s, error: %s", runtimeOpts := data.(*runctypes.RuncOptions)
dad, output, err) r.Engine = runtimeOpts.Runtime
} r.Root = runtimeOpts.RuntimeRoot
return nil return r, nil
} }

View File

@ -26,7 +26,7 @@ import (
// ListImages lists existing images. // ListImages lists existing images.
// TODO(random-liu): Add image list filters after CRI defines this more clear, and kubelet // TODO(random-liu): Add image list filters after CRI defines this more clear, and kubelet
// actually needs it. // actually needs it.
func (c *criContainerdService) ListImages(ctx context.Context, r *runtime.ListImagesRequest) (*runtime.ListImagesResponse, error) { func (c *criService) ListImages(ctx context.Context, r *runtime.ListImagesRequest) (*runtime.ListImagesResponse, error) {
imagesInStore := c.imageStore.List() imagesInStore := c.imageStore.List()
var images []*runtime.Image var images []*runtime.Image

View File

@ -17,12 +17,12 @@ limitations under the License.
package server package server
import ( import (
"fmt"
"golang.org/x/net/context"
"os" "os"
"path/filepath" "path/filepath"
"github.com/pkg/errors"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"golang.org/x/net/context"
api "github.com/containerd/cri/pkg/api/v1" api "github.com/containerd/cri/pkg/api/v1"
"github.com/containerd/cri/pkg/containerd/importer" "github.com/containerd/cri/pkg/containerd/importer"
@ -30,23 +30,23 @@ import (
) )
// LoadImage loads a image into containerd. // LoadImage loads a image into containerd.
func (c *criContainerdService) LoadImage(ctx context.Context, r *api.LoadImageRequest) (*api.LoadImageResponse, error) { func (c *criService) LoadImage(ctx context.Context, r *api.LoadImageRequest) (*api.LoadImageResponse, error) {
path := r.GetFilePath() path := r.GetFilePath()
if !filepath.IsAbs(path) { if !filepath.IsAbs(path) {
return nil, fmt.Errorf("path %q is not an absolute path", path) return nil, errors.Errorf("path %q is not an absolute path", path)
} }
f, err := os.Open(path) f, err := os.Open(path)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to open file: %v", err) return nil, errors.Wrap(err, "failed to open file")
} }
repoTags, err := importer.Import(ctx, c.client, f) repoTags, err := importer.Import(ctx, c.client, f)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to import image: %v", err) return nil, errors.Wrap(err, "failed to import image")
} }
for _, repoTag := range repoTags { for _, repoTag := range repoTags {
image, err := c.client.GetImage(ctx, repoTag) image, err := c.client.GetImage(ctx, repoTag)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to get image %q: %v", repoTag, err) return nil, errors.Wrapf(err, "failed to get image %q", repoTag)
} }
if err := image.Unpack(ctx, c.config.ContainerdConfig.Snapshotter); err != nil { if err := image.Unpack(ctx, c.config.ContainerdConfig.Snapshotter); err != nil {
logrus.WithError(err).Warnf("Failed to unpack image %q", repoTag) logrus.WithError(err).Warnf("Failed to unpack image %q", repoTag)
@ -54,12 +54,12 @@ func (c *criContainerdService) LoadImage(ctx context.Context, r *api.LoadImageRe
} }
info, err := getImageInfo(ctx, image) info, err := getImageInfo(ctx, image)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to get image %q info: %v", repoTag, err) return nil, errors.Wrapf(err, "failed to get image %q info", repoTag)
} }
id := info.id id := info.id
if err := c.createImageReference(ctx, id, image.Target()); err != nil { if err := c.createImageReference(ctx, id, image.Target()); err != nil {
return nil, fmt.Errorf("failed to create image reference %q: %v", id, err) return nil, errors.Wrapf(err, "failed to create image reference %q", id)
} }
img := imagestore.Image{ img := imagestore.Image{
@ -72,7 +72,7 @@ func (c *criContainerdService) LoadImage(ctx context.Context, r *api.LoadImageRe
} }
if err := c.imageStore.Add(img); err != nil { if err := c.imageStore.Add(img); err != nil {
return nil, fmt.Errorf("failed to add image %q into store: %v", id, err) return nil, errors.Wrapf(err, "failed to add image %q into store", id)
} }
logrus.Debugf("Imported image with id %q, repo tag %q", id, repoTag) logrus.Debugf("Imported image with id %q, repo tag %q", id, repoTag)
} }

View File

@ -18,7 +18,6 @@ package server
import ( import (
"encoding/base64" "encoding/base64"
"fmt"
"net/http" "net/http"
"strings" "strings"
@ -26,6 +25,7 @@ import (
"github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/errdefs"
containerdimages "github.com/containerd/containerd/images" containerdimages "github.com/containerd/containerd/images"
imagespec "github.com/opencontainers/image-spec/specs-go/v1" imagespec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"golang.org/x/net/context" "golang.org/x/net/context"
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
@ -77,11 +77,11 @@ import (
// contents are missing but snapshots are ready, is the image still "READY"? // contents are missing but snapshots are ready, is the image still "READY"?
// PullImage pulls an image with authentication config. // PullImage pulls an image with authentication config.
func (c *criContainerdService) PullImage(ctx context.Context, r *runtime.PullImageRequest) (*runtime.PullImageResponse, error) { func (c *criService) PullImage(ctx context.Context, r *runtime.PullImageRequest) (*runtime.PullImageResponse, error) {
imageRef := r.GetImage().GetImage() imageRef := r.GetImage().GetImage()
namedRef, err := util.NormalizeImageRef(imageRef) namedRef, err := util.NormalizeImageRef(imageRef)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse image reference %q: %v", imageRef, err) return nil, errors.Wrapf(err, "failed to parse image reference %q", imageRef)
} }
ref := namedRef.String() ref := namedRef.String()
if ref != imageRef { if ref != imageRef {
@ -94,7 +94,7 @@ func (c *criContainerdService) PullImage(ctx context.Context, r *runtime.PullIma
}) })
_, desc, err := resolver.Resolve(ctx, ref) _, desc, err := resolver.Resolve(ctx, ref)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to resolve image %q: %v", ref, err) return nil, errors.Wrapf(err, "failed to resolve image %q", ref)
} }
// We have to check schema1 here, because after `Pull`, schema1 // We have to check schema1 here, because after `Pull`, schema1
// image has already been converted. // image has already been converted.
@ -106,7 +106,7 @@ func (c *criContainerdService) PullImage(ctx context.Context, r *runtime.PullIma
containerd.WithResolver(resolver), containerd.WithResolver(resolver),
) )
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to pull image %q: %v", ref, err) return nil, errors.Wrapf(err, "failed to pull image %q", ref)
} }
// Do best effort unpack. // Do best effort unpack.
@ -119,7 +119,7 @@ func (c *criContainerdService) PullImage(ctx context.Context, r *runtime.PullIma
// Get image information. // Get image information.
info, err := getImageInfo(ctx, image) info, err := getImageInfo(ctx, image)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to get image information: %v", err) return nil, errors.Wrap(err, "failed to get image information")
} }
imageID := info.id imageID := info.id
@ -129,7 +129,7 @@ func (c *criContainerdService) PullImage(ctx context.Context, r *runtime.PullIma
continue continue
} }
if err := c.createImageReference(ctx, r, image.Target()); err != nil { if err := c.createImageReference(ctx, r, image.Target()); err != nil {
return nil, fmt.Errorf("failed to update image reference %q: %v", r, err) return nil, errors.Wrapf(err, "failed to update image reference %q", r)
} }
} }
@ -150,7 +150,7 @@ func (c *criContainerdService) PullImage(ctx context.Context, r *runtime.PullIma
} }
if err := c.imageStore.Add(img); err != nil { if err := c.imageStore.Add(img); err != nil {
return nil, fmt.Errorf("failed to add image %q into store: %v", img.ID, err) return nil, errors.Wrapf(err, "failed to add image %q into store", img.ID)
} }
// NOTE(random-liu): the actual state in containerd is the source of truth, even we maintain // NOTE(random-liu): the actual state in containerd is the source of truth, even we maintain
@ -181,20 +181,20 @@ func ParseAuth(auth *runtime.AuthConfig) (string, string, error) {
} }
fields := strings.SplitN(string(decoded), ":", 2) fields := strings.SplitN(string(decoded), ":", 2)
if len(fields) != 2 { if len(fields) != 2 {
return "", "", fmt.Errorf("invalid decoded auth: %q", decoded) return "", "", errors.Errorf("invalid decoded auth: %q", decoded)
} }
user, passwd := fields[0], fields[1] user, passwd := fields[0], fields[1]
return user, strings.Trim(passwd, "\x00"), nil return user, strings.Trim(passwd, "\x00"), nil
} }
// TODO(random-liu): Support RegistryToken. // TODO(random-liu): Support RegistryToken.
return "", "", fmt.Errorf("invalid auth config") return "", "", errors.New("invalid auth config")
} }
// createImageReference creates image reference inside containerd image store. // createImageReference creates image reference inside containerd image store.
// Note that because create and update are not finished in one transaction, there could be race. E.g. // Note that because create and update are not finished in one transaction, there could be race. E.g.
// the image reference is deleted by someone else after create returns already exists, but before update // the image reference is deleted by someone else after create returns already exists, but before update
// happens. // happens.
func (c *criContainerdService) createImageReference(ctx context.Context, name string, desc imagespec.Descriptor) error { func (c *criService) createImageReference(ctx context.Context, name string, desc imagespec.Descriptor) error {
img := containerdimages.Image{ img := containerdimages.Image{
Name: name, Name: name,
Target: desc, Target: desc,
@ -212,7 +212,7 @@ func (c *criContainerdService) createImageReference(ctx context.Context, name st
return err return err
} }
func (c *criContainerdService) getResolverOptions() map[string][]string { func (c *criService) getResolverOptions() map[string][]string {
options := make(map[string][]string) options := make(map[string][]string)
for ns, mirror := range c.config.Mirrors { for ns, mirror := range c.config.Mirrors {
options[ns] = append(options[ns], mirror.Endpoints...) options[ns] = append(options[ns], mirror.Endpoints...)

View File

@ -17,10 +17,9 @@ limitations under the License.
package server package server
import ( import (
"fmt"
"github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/images" "github.com/containerd/containerd/images"
"github.com/pkg/errors"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"golang.org/x/net/context" "golang.org/x/net/context"
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
@ -32,10 +31,10 @@ import (
// TODO(random-liu): We should change CRI to distinguish image id and image spec. // TODO(random-liu): We should change CRI to distinguish image id and image spec.
// Remove the whole image no matter the it's image id or reference. This is the // Remove the whole image no matter the it's image id or reference. This is the
// semantic defined in CRI now. // semantic defined in CRI now.
func (c *criContainerdService) RemoveImage(ctx context.Context, r *runtime.RemoveImageRequest) (*runtime.RemoveImageResponse, error) { func (c *criService) RemoveImage(ctx context.Context, r *runtime.RemoveImageRequest) (*runtime.RemoveImageResponse, error) {
image, err := c.localResolve(ctx, r.GetImage().GetImage()) image, err := c.localResolve(ctx, r.GetImage().GetImage())
if err != nil { if err != nil {
return nil, fmt.Errorf("can not resolve %q locally: %v", r.GetImage().GetImage(), err) return nil, errors.Wrapf(err, "can not resolve %q locally", r.GetImage().GetImage())
} }
if image == nil { if image == nil {
// return empty without error when image not found. // return empty without error when image not found.
@ -49,7 +48,7 @@ func (c *criContainerdService) RemoveImage(ctx context.Context, r *runtime.Remov
if errdefs.IsNotFound(err) { if errdefs.IsNotFound(err) {
continue continue
} }
return nil, fmt.Errorf("failed to get image %q: %v", tag, err) return nil, errors.Wrapf(err, "failed to get image %q", tag)
} }
desc, err := cImage.Config(ctx) desc, err := cImage.Config(ctx)
if err != nil { if err != nil {
@ -82,12 +81,12 @@ func (c *criContainerdService) RemoveImage(ctx context.Context, r *runtime.Remov
if err == nil || errdefs.IsNotFound(err) { if err == nil || errdefs.IsNotFound(err) {
continue continue
} }
return nil, fmt.Errorf("failed to delete image reference %q for image %q: %v", ref, image.ID, err) return nil, errors.Wrapf(err, "failed to delete image reference %q for image %q", ref, image.ID)
} }
// Delete image id synchronously to trigger garbage collection. // Delete image id synchronously to trigger garbage collection.
err = c.client.ImageService().Delete(ctx, image.ID, images.SynchronousDelete()) err = c.client.ImageService().Delete(ctx, image.ID, images.SynchronousDelete())
if err != nil && !errdefs.IsNotFound(err) { if err != nil && !errdefs.IsNotFound(err) {
return nil, fmt.Errorf("failed to delete image id %q: %v", image.ID, err) return nil, errors.Wrapf(err, "failed to delete image id %q", image.ID)
} }
c.imageStore.Delete(image.ID) c.imageStore.Delete(image.ID)
return &runtime.RemoveImageResponse{}, nil return &runtime.RemoveImageResponse{}, nil

View File

@ -18,8 +18,8 @@ package server
import ( import (
"encoding/json" "encoding/json"
"fmt"
"github.com/pkg/errors"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"golang.org/x/net/context" "golang.org/x/net/context"
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
@ -31,10 +31,10 @@ import (
// ImageStatus returns the status of the image, returns nil if the image isn't present. // ImageStatus returns the status of the image, returns nil if the image isn't present.
// TODO(random-liu): We should change CRI to distinguish image id and image spec. (See // TODO(random-liu): We should change CRI to distinguish image id and image spec. (See
// kubernetes/kubernetes#46255) // kubernetes/kubernetes#46255)
func (c *criContainerdService) ImageStatus(ctx context.Context, r *runtime.ImageStatusRequest) (*runtime.ImageStatusResponse, error) { func (c *criService) ImageStatus(ctx context.Context, r *runtime.ImageStatusRequest) (*runtime.ImageStatusResponse, error) {
image, err := c.localResolve(ctx, r.GetImage().GetImage()) image, err := c.localResolve(ctx, r.GetImage().GetImage())
if err != nil { if err != nil {
return nil, fmt.Errorf("can not resolve %q locally: %v", r.GetImage().GetImage(), err) return nil, errors.Wrapf(err, "can not resolve %q locally", r.GetImage().GetImage())
} }
if image == nil { if image == nil {
// return empty without error when image not found. // return empty without error when image not found.
@ -46,7 +46,7 @@ func (c *criContainerdService) ImageStatus(ctx context.Context, r *runtime.Image
runtimeImage := toCRIRuntimeImage(image) runtimeImage := toCRIRuntimeImage(image)
info, err := c.toCRIImageInfo(ctx, image, r.GetVerbose()) info, err := c.toCRIImageInfo(ctx, image, r.GetVerbose())
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to generate image info: %v", err) return nil, errors.Wrap(err, "failed to generate image info")
} }
return &runtime.ImageStatusResponse{ return &runtime.ImageStatusResponse{
@ -79,7 +79,7 @@ type verboseImageInfo struct {
} }
// toCRIImageInfo converts internal image object information to CRI image status response info map. // toCRIImageInfo converts internal image object information to CRI image status response info map.
func (c *criContainerdService) toCRIImageInfo(ctx context.Context, image *imagestore.Image, verbose bool) (map[string]string, error) { func (c *criService) toCRIImageInfo(ctx context.Context, image *imagestore.Image, verbose bool) (map[string]string, error) {
if !verbose { if !verbose {
return nil, nil return nil, nil
} }

View File

@ -25,7 +25,7 @@ import (
) )
// ImageFsInfo returns information of the filesystem that is used to store images. // ImageFsInfo returns information of the filesystem that is used to store images.
func (c *criContainerdService) ImageFsInfo(ctx context.Context, r *runtime.ImageFsInfoRequest) (*runtime.ImageFsInfoResponse, error) { func (c *criService) ImageFsInfo(ctx context.Context, r *runtime.ImageFsInfoRequest) (*runtime.ImageFsInfoResponse, error) {
snapshots := c.snapshotStore.List() snapshots := c.snapshotStore.List()
timestamp := time.Now().UnixNano() timestamp := time.Now().UnixNano()
var usedBytes, inodesUsed uint64 var usedBytes, inodesUsed uint64

View File

@ -30,10 +30,10 @@ import (
// instrumentedService wraps service with containerd namespace and logs. // instrumentedService wraps service with containerd namespace and logs.
type instrumentedService struct { type instrumentedService struct {
c *criContainerdService c *criService
} }
func newInstrumentedService(c *criContainerdService) grpcServices { func newInstrumentedService(c *criService) grpcServices {
return &instrumentedService{c: c} return &instrumentedService{c: c}
} }

View File

@ -19,12 +19,12 @@ package io
import ( import (
"bufio" "bufio"
"bytes" "bytes"
"fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"os" "os"
"time" "time"
"github.com/pkg/errors"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
@ -57,7 +57,7 @@ func NewCRILogger(path string, stream StreamType) (io.WriteCloser, error) {
prc, pwc := io.Pipe() prc, pwc := io.Pipe()
f, err := os.OpenFile(path, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0640) f, err := os.OpenFile(path, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0640)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to open log file: %v", err) return nil, errors.Wrap(err, "failed to open log file")
} }
go redirectLogs(path, prc, f, stream) go redirectLogs(path, prc, f, stream)
return pwc, nil return pwc, nil

View File

@ -17,7 +17,6 @@ limitations under the License.
package server package server
import ( import (
"fmt"
"io/ioutil" "io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
@ -31,6 +30,7 @@ import (
"github.com/containerd/typeurl" "github.com/containerd/typeurl"
"github.com/docker/distribution/reference" "github.com/docker/distribution/reference"
"github.com/docker/docker/pkg/system" "github.com/docker/docker/pkg/system"
"github.com/pkg/errors"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"golang.org/x/net/context" "golang.org/x/net/context"
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
@ -51,11 +51,11 @@ import (
// tolerant tasks being created or started, we prefer that not to happen. // tolerant tasks being created or started, we prefer that not to happen.
// recover recovers system state from containerd and status checkpoint. // recover recovers system state from containerd and status checkpoint.
func (c *criContainerdService) recover(ctx context.Context) error { func (c *criService) recover(ctx context.Context) error {
// Recover all sandboxes. // Recover all sandboxes.
sandboxes, err := c.client.Containers(ctx, filterLabel(containerKindLabel, containerKindSandbox)) sandboxes, err := c.client.Containers(ctx, filterLabel(containerKindLabel, containerKindSandbox))
if err != nil { if err != nil {
return fmt.Errorf("failed to list sandbox containers: %v", err) return errors.Wrap(err, "failed to list sandbox containers")
} }
for _, sandbox := range sandboxes { for _, sandbox := range sandboxes {
sb, err := loadSandbox(ctx, sandbox) sb, err := loadSandbox(ctx, sandbox)
@ -65,47 +65,48 @@ func (c *criContainerdService) recover(ctx context.Context) error {
} }
logrus.Debugf("Loaded sandbox %+v", sb) logrus.Debugf("Loaded sandbox %+v", sb)
if err := c.sandboxStore.Add(sb); err != nil { if err := c.sandboxStore.Add(sb); err != nil {
return fmt.Errorf("failed to add sandbox %q to store: %v", sandbox.ID(), err) return errors.Wrapf(err, "failed to add sandbox %q to store", sandbox.ID())
} }
if err := c.sandboxNameIndex.Reserve(sb.Name, sb.ID); err != nil { if err := c.sandboxNameIndex.Reserve(sb.Name, sb.ID); err != nil {
return fmt.Errorf("failed to reserve sandbox name %q: %v", sb.Name, err) return errors.Wrapf(err, "failed to reserve sandbox name %q", sb.Name)
} }
} }
// Recover all containers. // Recover all containers.
containers, err := c.client.Containers(ctx, filterLabel(containerKindLabel, containerKindContainer)) containers, err := c.client.Containers(ctx, filterLabel(containerKindLabel, containerKindContainer))
if err != nil { if err != nil {
return fmt.Errorf("failed to list containers: %v", err) return errors.Wrap(err, "failed to list containers")
} }
for _, container := range containers { for _, container := range containers {
containerDir := getContainerRootDir(c.config.RootDir, container.ID()) containerDir := c.getContainerRootDir(container.ID())
cntr, err := loadContainer(ctx, container, containerDir) volatileContainerDir := c.getVolatileContainerRootDir(container.ID())
cntr, err := loadContainer(ctx, container, containerDir, volatileContainerDir)
if err != nil { if err != nil {
logrus.WithError(err).Errorf("Failed to load container %q", container.ID()) logrus.WithError(err).Errorf("Failed to load container %q", container.ID())
continue continue
} }
logrus.Debugf("Loaded container %+v", cntr) logrus.Debugf("Loaded container %+v", cntr)
if err := c.containerStore.Add(cntr); err != nil { if err := c.containerStore.Add(cntr); err != nil {
return fmt.Errorf("failed to add container %q to store: %v", container.ID(), err) return errors.Wrapf(err, "failed to add container %q to store", container.ID())
} }
if err := c.containerNameIndex.Reserve(cntr.Name, cntr.ID); err != nil { if err := c.containerNameIndex.Reserve(cntr.Name, cntr.ID); err != nil {
return fmt.Errorf("failed to reserve container name %q: %v", cntr.Name, err) return errors.Wrapf(err, "failed to reserve container name %q", cntr.Name)
} }
} }
// Recover all images. // Recover all images.
cImages, err := c.client.ListImages(ctx) cImages, err := c.client.ListImages(ctx)
if err != nil { if err != nil {
return fmt.Errorf("failed to list images: %v", err) return errors.Wrap(err, "failed to list images")
} }
images, err := loadImages(ctx, cImages, c.config.ContainerdConfig.Snapshotter) images, err := loadImages(ctx, cImages, c.config.ContainerdConfig.Snapshotter)
if err != nil { if err != nil {
return fmt.Errorf("failed to load images: %v", err) return errors.Wrap(err, "failed to load images")
} }
for _, image := range images { for _, image := range images {
logrus.Debugf("Loaded image %+v", image) logrus.Debugf("Loaded image %+v", image)
if err := c.imageStore.Add(image); err != nil { if err := c.imageStore.Add(image); err != nil {
return fmt.Errorf("failed to add image %q to store: %v", image.ID, err) return errors.Wrapf(err, "failed to add image %q to store", image.ID)
} }
} }
@ -113,35 +114,56 @@ func (c *criContainerdService) recover(ctx context.Context) error {
// we can't even get metadata, we should cleanup orphaned sandbox/container directories // we can't even get metadata, we should cleanup orphaned sandbox/container directories
// with best effort. // with best effort.
// Cleanup orphaned sandbox directories without corresponding containerd container. // Cleanup orphaned sandbox and container directories without corresponding containerd container.
if err := cleanupOrphanedSandboxDirs(sandboxes, filepath.Join(c.config.RootDir, "sandboxes")); err != nil { for _, cleanup := range []struct {
return fmt.Errorf("failed to cleanup orphaned sandbox directories: %v", err) cntrs []containerd.Container
base string
errMsg string
}{
{
cntrs: sandboxes,
base: filepath.Join(c.config.RootDir, sandboxesDir),
errMsg: "failed to cleanup orphaned sandbox directories",
},
{
cntrs: sandboxes,
base: filepath.Join(c.config.StateDir, sandboxesDir),
errMsg: "failed to cleanup orphaned volatile sandbox directories",
},
{
cntrs: containers,
base: filepath.Join(c.config.RootDir, containersDir),
errMsg: "failed to cleanup orphaned container directories",
},
{
cntrs: containers,
base: filepath.Join(c.config.StateDir, containersDir),
errMsg: "failed to cleanup orphaned volatile container directories",
},
} {
if err := cleanupOrphanedIDDirs(cleanup.cntrs, cleanup.base); err != nil {
return errors.Wrap(err, cleanup.errMsg)
} }
// Cleanup orphaned container directories without corresponding containerd container.
if err := cleanupOrphanedContainerDirs(containers, filepath.Join(c.config.RootDir, "containers")); err != nil {
return fmt.Errorf("failed to cleanup orphaned container directories: %v", err)
} }
return nil return nil
} }
// loadContainer loads container from containerd and status checkpoint. // loadContainer loads container from containerd and status checkpoint.
func loadContainer(ctx context.Context, cntr containerd.Container, containerDir string) (containerstore.Container, error) { func loadContainer(ctx context.Context, cntr containerd.Container, containerDir, volatileContainerDir string) (containerstore.Container, error) {
id := cntr.ID() id := cntr.ID()
var container containerstore.Container var container containerstore.Container
// Load container metadata. // Load container metadata.
exts, err := cntr.Extensions(ctx) exts, err := cntr.Extensions(ctx)
if err != nil { if err != nil {
return container, fmt.Errorf("failed to get container extensions: %v", err) return container, errors.Wrap(err, "failed to get container extensions")
} }
ext, ok := exts[containerMetadataExtension] ext, ok := exts[containerMetadataExtension]
if !ok { if !ok {
return container, fmt.Errorf("metadata extension %q not found", containerMetadataExtension) return container, errors.Errorf("metadata extension %q not found", containerMetadataExtension)
} }
data, err := typeurl.UnmarshalAny(&ext) data, err := typeurl.UnmarshalAny(&ext)
if err != nil { if err != nil {
return container, fmt.Errorf("failed to unmarshal metadata extension %q: %v", ext, err) return container, errors.Wrapf(err, "failed to unmarshal metadata extension %q", ext)
} }
meta := data.(*containerstore.Metadata) meta := data.(*containerstore.Metadata)
@ -170,7 +192,7 @@ func loadContainer(ctx context.Context, cntr containerd.Container, containerDir
return containerIO, nil return containerIO, nil
}) })
if err != nil && !errdefs.IsNotFound(err) { if err != nil && !errdefs.IsNotFound(err) {
return container, fmt.Errorf("failed to load task: %v", err) return container, errors.Wrap(err, "failed to load task")
} }
var s containerd.Status var s containerd.Status
var notFound bool var notFound bool
@ -183,7 +205,7 @@ func loadContainer(ctx context.Context, cntr containerd.Container, containerDir
if err != nil { if err != nil {
// It's still possible that task is deleted during this window. // It's still possible that task is deleted during this window.
if !errdefs.IsNotFound(err) { if !errdefs.IsNotFound(err) {
return container, fmt.Errorf("failed to get task status: %v", err) return container, errors.Wrap(err, "failed to get task status")
} }
notFound = true notFound = true
} }
@ -197,10 +219,10 @@ func loadContainer(ctx context.Context, cntr containerd.Container, containerDir
// containerd got restarted during that. In that case, we still // containerd got restarted during that. In that case, we still
// treat the container as `CREATED`. // treat the container as `CREATED`.
containerIO, err = cio.NewContainerIO(id, containerIO, err = cio.NewContainerIO(id,
cio.WithNewFIFOs(containerDir, meta.Config.GetTty(), meta.Config.GetStdin()), cio.WithNewFIFOs(volatileContainerDir, meta.Config.GetTty(), meta.Config.GetStdin()),
) )
if err != nil { if err != nil {
return container, fmt.Errorf("failed to create container io: %v", err) return container, errors.Wrap(err, "failed to create container io")
} }
case runtime.ContainerState_CONTAINER_RUNNING: case runtime.ContainerState_CONTAINER_RUNNING:
// Container was in running state, but its task has been deleted, // Container was in running state, but its task has been deleted,
@ -219,17 +241,17 @@ func loadContainer(ctx context.Context, cntr containerd.Container, containerDir
// gets restarted during container start. // gets restarted during container start.
// Container must be in `CREATED` state. // Container must be in `CREATED` state.
if _, err := t.Delete(ctx, containerd.WithProcessKill); err != nil && !errdefs.IsNotFound(err) { if _, err := t.Delete(ctx, containerd.WithProcessKill); err != nil && !errdefs.IsNotFound(err) {
return container, fmt.Errorf("failed to delete task: %v", err) return container, errors.Wrap(err, "failed to delete task")
} }
if status.State() != runtime.ContainerState_CONTAINER_CREATED { if status.State() != runtime.ContainerState_CONTAINER_CREATED {
return container, fmt.Errorf("unexpected container state for created task: %q", status.State()) return container, errors.Errorf("unexpected container state for created task: %q", status.State())
} }
case containerd.Running: case containerd.Running:
// Task is running. Container must be in `RUNNING` state, based on our assuption that // Task is running. Container must be in `RUNNING` state, based on our assuption that
// "task should not be started when containerd is down". // "task should not be started when containerd is down".
switch status.State() { switch status.State() {
case runtime.ContainerState_CONTAINER_EXITED: case runtime.ContainerState_CONTAINER_EXITED:
return container, fmt.Errorf("unexpected container state for running task: %q", status.State()) return container, errors.Errorf("unexpected container state for running task: %q", status.State())
case runtime.ContainerState_CONTAINER_RUNNING: case runtime.ContainerState_CONTAINER_RUNNING:
default: default:
// This may happen if containerd gets restarted after task is started, but // This may happen if containerd gets restarted after task is started, but
@ -240,12 +262,12 @@ func loadContainer(ctx context.Context, cntr containerd.Container, containerDir
case containerd.Stopped: case containerd.Stopped:
// Task is stopped. Updata status and delete the task. // Task is stopped. Updata status and delete the task.
if _, err := t.Delete(ctx, containerd.WithProcessKill); err != nil && !errdefs.IsNotFound(err) { if _, err := t.Delete(ctx, containerd.WithProcessKill); err != nil && !errdefs.IsNotFound(err) {
return container, fmt.Errorf("failed to delete task: %v", err) return container, errors.Wrap(err, "failed to delete task")
} }
status.FinishedAt = s.ExitTime.UnixNano() status.FinishedAt = s.ExitTime.UnixNano()
status.ExitCode = int32(s.ExitStatus) status.ExitCode = int32(s.ExitStatus)
default: default:
return container, fmt.Errorf("unexpected task status %q", s.Status) return container, errors.Errorf("unexpected task status %q", s.Status)
} }
} }
opts := []containerstore.Opts{ opts := []containerstore.Opts{
@ -282,29 +304,29 @@ func loadSandbox(ctx context.Context, cntr containerd.Container) (sandboxstore.S
// Load sandbox metadata. // Load sandbox metadata.
exts, err := cntr.Extensions(ctx) exts, err := cntr.Extensions(ctx)
if err != nil { if err != nil {
return sandbox, fmt.Errorf("failed to get sandbox container extensions: %v", err) return sandbox, errors.Wrap(err, "failed to get sandbox container extensions")
} }
ext, ok := exts[sandboxMetadataExtension] ext, ok := exts[sandboxMetadataExtension]
if !ok { if !ok {
return sandbox, fmt.Errorf("metadata extension %q not found", sandboxMetadataExtension) return sandbox, errors.Errorf("metadata extension %q not found", sandboxMetadataExtension)
} }
data, err := typeurl.UnmarshalAny(&ext) data, err := typeurl.UnmarshalAny(&ext)
if err != nil { if err != nil {
return sandbox, fmt.Errorf("failed to unmarshal metadata extension %q: %v", ext, err) return sandbox, errors.Wrapf(err, "failed to unmarshal metadata extension %q", ext)
} }
meta := data.(*sandboxstore.Metadata) meta := data.(*sandboxstore.Metadata)
// Load sandbox created timestamp. // Load sandbox created timestamp.
info, err := cntr.Info(ctx) info, err := cntr.Info(ctx)
if err != nil { if err != nil {
return sandbox, fmt.Errorf("failed to get sandbox container info: %v", err) return sandbox, errors.Wrap(err, "failed to get sandbox container info")
} }
createdAt := info.CreatedAt createdAt := info.CreatedAt
// Load sandbox status. // Load sandbox status.
t, err := cntr.Task(ctx, nil) t, err := cntr.Task(ctx, nil)
if err != nil && !errdefs.IsNotFound(err) { if err != nil && !errdefs.IsNotFound(err) {
return sandbox, fmt.Errorf("failed to load task: %v", err) return sandbox, errors.Wrap(err, "failed to load task")
} }
var s containerd.Status var s containerd.Status
var notFound bool var notFound bool
@ -317,7 +339,7 @@ func loadSandbox(ctx context.Context, cntr containerd.Container) (sandboxstore.S
if err != nil { if err != nil {
// It's still possible that task is deleted during this window. // It's still possible that task is deleted during this window.
if !errdefs.IsNotFound(err) { if !errdefs.IsNotFound(err) {
return sandbox, fmt.Errorf("failed to get task status: %v", err) return sandbox, errors.Wrap(err, "failed to get task status")
} }
notFound = true notFound = true
} }
@ -335,7 +357,7 @@ func loadSandbox(ctx context.Context, cntr containerd.Container) (sandboxstore.S
} else { } else {
// Task is not running. Delete the task and set sandbox state as NOTREADY. // Task is not running. Delete the task and set sandbox state as NOTREADY.
if _, err := t.Delete(ctx, containerd.WithProcessKill); err != nil && !errdefs.IsNotFound(err) { if _, err := t.Delete(ctx, containerd.WithProcessKill); err != nil && !errdefs.IsNotFound(err) {
return sandbox, fmt.Errorf("failed to delete task: %v", err) return sandbox, errors.Wrap(err, "failed to delete task")
} }
state = sandboxstore.StateNotReady state = sandboxstore.StateNotReady
} }
@ -359,7 +381,7 @@ func loadSandbox(ctx context.Context, cntr containerd.Container) (sandboxstore.S
netNS, err := sandboxstore.LoadNetNS(meta.NetNSPath) netNS, err := sandboxstore.LoadNetNS(meta.NetNSPath)
if err != nil { if err != nil {
if err != sandboxstore.ErrClosedNetNS { if err != sandboxstore.ErrClosedNetNS {
return sandbox, fmt.Errorf("failed to load netns %q: %v", meta.NetNSPath, err) return sandbox, errors.Wrapf(err, "failed to load netns %q", meta.NetNSPath)
} }
netNS = nil netNS = nil
} }
@ -448,59 +470,30 @@ func loadImages(ctx context.Context, cImages []containerd.Image,
return images, nil return images, nil
} }
func cleanupOrphanedSandboxDirs(cntrs []containerd.Container, sandboxesRoot string) error { func cleanupOrphanedIDDirs(cntrs []containerd.Container, base string) error {
// Cleanup orphaned sandbox directories. // Cleanup orphaned id directories.
dirs, err := ioutil.ReadDir(sandboxesRoot) dirs, err := ioutil.ReadDir(base)
if err != nil && !os.IsNotExist(err) { if err != nil && !os.IsNotExist(err) {
return fmt.Errorf("failed to read pod sandboxes directory %q: %v", sandboxesRoot, err) return errors.Wrap(err, "failed to read base directory")
} }
cntrsMap := make(map[string]containerd.Container) idsMap := make(map[string]containerd.Container)
for _, cntr := range cntrs { for _, cntr := range cntrs {
cntrsMap[cntr.ID()] = cntr idsMap[cntr.ID()] = cntr
} }
for _, d := range dirs { for _, d := range dirs {
if !d.IsDir() { if !d.IsDir() {
logrus.Warnf("Invalid file %q found in pod sandboxes directory", d.Name()) logrus.Warnf("Invalid file %q found in base directory %q", d.Name(), base)
continue continue
} }
if _, ok := cntrsMap[d.Name()]; ok { if _, ok := idsMap[d.Name()]; ok {
// Do not remove sandbox directory if corresponding container is found. // Do not remove id directory if corresponding container is found.
continue continue
} }
sandboxDir := filepath.Join(sandboxesRoot, d.Name()) dir := filepath.Join(base, d.Name())
if err := system.EnsureRemoveAll(sandboxDir); err != nil { if err := system.EnsureRemoveAll(dir); err != nil {
logrus.WithError(err).Warnf("Failed to remove pod sandbox directory %q", sandboxDir) logrus.WithError(err).Warnf("Failed to remove id directory %q", dir)
} else { } else {
logrus.Debugf("Cleanup orphaned pod sandbox directory %q", sandboxDir) logrus.Debugf("Cleanup orphaned id directory %q", dir)
}
}
return nil
}
func cleanupOrphanedContainerDirs(cntrs []containerd.Container, containersRoot string) error {
// Cleanup orphaned container directories.
dirs, err := ioutil.ReadDir(containersRoot)
if err != nil && !os.IsNotExist(err) {
return fmt.Errorf("failed to read containers directory %q: %v", containersRoot, err)
}
cntrsMap := make(map[string]containerd.Container)
for _, cntr := range cntrs {
cntrsMap[cntr.ID()] = cntr
}
for _, d := range dirs {
if !d.IsDir() {
logrus.Warnf("Invalid file %q found in containers directory", d.Name())
continue
}
if _, ok := cntrsMap[d.Name()]; ok {
// Do not remove container directory if corresponding container is found.
continue
}
containerDir := filepath.Join(containersRoot, d.Name())
if err := system.EnsureRemoveAll(containerDir); err != nil {
logrus.WithError(err).Warnf("Failed to remove container directory %q", containerDir)
} else {
logrus.Debugf("Cleanup orphaned container directory %q", containerDir)
} }
} }
return nil return nil

View File

@ -24,7 +24,7 @@ import (
) )
// ListPodSandbox returns a list of Sandbox. // ListPodSandbox returns a list of Sandbox.
func (c *criContainerdService) ListPodSandbox(ctx context.Context, r *runtime.ListPodSandboxRequest) (*runtime.ListPodSandboxResponse, error) { func (c *criService) ListPodSandbox(ctx context.Context, r *runtime.ListPodSandboxRequest) (*runtime.ListPodSandboxResponse, error) {
// List all sandboxes from store. // List all sandboxes from store.
sandboxesInStore := c.sandboxStore.List() sandboxesInStore := c.sandboxStore.List()
var sandboxes []*runtime.PodSandbox var sandboxes []*runtime.PodSandbox
@ -56,14 +56,14 @@ func toCRISandbox(meta sandboxstore.Metadata, status sandboxstore.Status) *runti
} }
} }
func (c *criContainerdService) normalizePodSandboxFilter(filter *runtime.PodSandboxFilter) { func (c *criService) normalizePodSandboxFilter(filter *runtime.PodSandboxFilter) {
if sb, err := c.sandboxStore.Get(filter.GetId()); err == nil { if sb, err := c.sandboxStore.Get(filter.GetId()); err == nil {
filter.Id = sb.ID filter.Id = sb.ID
} }
} }
// filterCRISandboxes filters CRISandboxes. // filterCRISandboxes filters CRISandboxes.
func (c *criContainerdService) filterCRISandboxes(sandboxes []*runtime.PodSandbox, filter *runtime.PodSandboxFilter) []*runtime.PodSandbox { func (c *criService) filterCRISandboxes(sandboxes []*runtime.PodSandbox, filter *runtime.PodSandboxFilter) []*runtime.PodSandbox {
if filter == nil { if filter == nil {
return sandboxes return sandboxes
} }

View File

@ -18,12 +18,12 @@ package server
import ( import (
"bytes" "bytes"
"errors"
"fmt" "fmt"
"io" "io"
"os/exec" "os/exec"
"strings" "strings"
"github.com/pkg/errors"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"golang.org/x/net/context" "golang.org/x/net/context"
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
@ -33,11 +33,11 @@ import (
) )
// PortForward prepares a streaming endpoint to forward ports from a PodSandbox, and returns the address. // PortForward prepares a streaming endpoint to forward ports from a PodSandbox, and returns the address.
func (c *criContainerdService) PortForward(ctx context.Context, r *runtime.PortForwardRequest) (retRes *runtime.PortForwardResponse, retErr error) { func (c *criService) PortForward(ctx context.Context, r *runtime.PortForwardRequest) (retRes *runtime.PortForwardResponse, retErr error) {
// TODO(random-liu): Run a socat container inside the sandbox to do portforward. // TODO(random-liu): Run a socat container inside the sandbox to do portforward.
sandbox, err := c.sandboxStore.Get(r.GetPodSandboxId()) sandbox, err := c.sandboxStore.Get(r.GetPodSandboxId())
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to find sandbox %q: %v", r.GetPodSandboxId(), err) return nil, errors.Wrapf(err, "failed to find sandbox %q", r.GetPodSandboxId())
} }
if sandbox.Status.Get().State != sandboxstore.StateReady { if sandbox.Status.Get().State != sandboxstore.StateReady {
return nil, errors.New("sandbox container is not running") return nil, errors.New("sandbox container is not running")
@ -49,20 +49,20 @@ func (c *criContainerdService) PortForward(ctx context.Context, r *runtime.PortF
// portForward requires `nsenter` and `socat` on the node, it uses `nsenter` to enter the // portForward requires `nsenter` and `socat` on the node, it uses `nsenter` to enter the
// sandbox namespace, and run `socat` inside the namespace to forward stream for a specific // sandbox namespace, and run `socat` inside the namespace to forward stream for a specific
// port. The `socat` command keeps running until it exits or client disconnect. // port. The `socat` command keeps running until it exits or client disconnect.
func (c *criContainerdService) portForward(id string, port int32, stream io.ReadWriteCloser) error { func (c *criService) portForward(id string, port int32, stream io.ReadWriteCloser) error {
s, err := c.sandboxStore.Get(id) s, err := c.sandboxStore.Get(id)
if err != nil { if err != nil {
return fmt.Errorf("failed to find sandbox %q in store: %v", id, err) return errors.Wrapf(err, "failed to find sandbox %q in store", id)
} }
t, err := s.Container.Task(ctrdutil.NamespacedContext(), nil) t, err := s.Container.Task(ctrdutil.NamespacedContext(), nil)
if err != nil { if err != nil {
return fmt.Errorf("failed to get sandbox container task: %v", err) return errors.Wrap(err, "failed to get sandbox container task")
} }
pid := t.Pid() pid := t.Pid()
socat, err := exec.LookPath("socat") socat, err := exec.LookPath("socat")
if err != nil { if err != nil {
return fmt.Errorf("failed to find socat: %v", err) return errors.Wrap(err, "failed to find socat")
} }
// Check following links for meaning of the options: // Check following links for meaning of the options:
@ -73,7 +73,7 @@ func (c *criContainerdService) portForward(id string, port int32, stream io.Read
nsenter, err := exec.LookPath("nsenter") nsenter, err := exec.LookPath("nsenter")
if err != nil { if err != nil {
return fmt.Errorf("failed to find nsenter: %v", err) return errors.Wrap(err, "failed to find nsenter")
} }
logrus.Infof("Executing port forwarding command: %s %s", nsenter, strings.Join(args, " ")) logrus.Infof("Executing port forwarding command: %s %s", nsenter, strings.Join(args, " "))
@ -95,7 +95,7 @@ func (c *criContainerdService) portForward(id string, port int32, stream io.Read
// when the command (socat) exits. // when the command (socat) exits.
in, err := cmd.StdinPipe() in, err := cmd.StdinPipe()
if err != nil { if err != nil {
return fmt.Errorf("failed to create stdin pipe: %v", err) return errors.Wrap(err, "failed to create stdin pipe")
} }
go func() { go func() {
if _, err := io.Copy(in, stream); err != nil { if _, err := io.Copy(in, stream); err != nil {
@ -106,7 +106,7 @@ func (c *criContainerdService) portForward(id string, port int32, stream io.Read
}() }()
if err := cmd.Run(); err != nil { if err := cmd.Run(); err != nil {
return fmt.Errorf("nsenter command returns error: %v, stderr: %q", err, stderr.String()) return errors.Errorf("nsenter command returns error: %v, stderr: %q", err, stderr.String())
} }
logrus.Infof("Finish port forwarding for %q port %d", id, port) logrus.Infof("Finish port forwarding for %q port %d", id, port)

View File

@ -17,11 +17,10 @@ limitations under the License.
package server package server
import ( import (
"fmt"
"github.com/containerd/containerd" "github.com/containerd/containerd"
"github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/errdefs"
"github.com/docker/docker/pkg/system" "github.com/docker/docker/pkg/system"
"github.com/pkg/errors"
"golang.org/x/net/context" "golang.org/x/net/context"
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
@ -32,12 +31,12 @@ import (
// RemovePodSandbox removes the sandbox. If there are running containers in the // RemovePodSandbox removes the sandbox. If there are running containers in the
// sandbox, they should be forcibly removed. // sandbox, they should be forcibly removed.
func (c *criContainerdService) RemovePodSandbox(ctx context.Context, r *runtime.RemovePodSandboxRequest) (*runtime.RemovePodSandboxResponse, error) { func (c *criService) RemovePodSandbox(ctx context.Context, r *runtime.RemovePodSandboxRequest) (*runtime.RemovePodSandboxResponse, error) {
sandbox, err := c.sandboxStore.Get(r.GetPodSandboxId()) sandbox, err := c.sandboxStore.Get(r.GetPodSandboxId())
if err != nil { if err != nil {
if err != store.ErrNotExist { if err != store.ErrNotExist {
return nil, fmt.Errorf("an error occurred when try to find sandbox %q: %v", return nil, errors.Wrapf(err, "an error occurred when try to find sandbox %q",
r.GetPodSandboxId(), err) r.GetPodSandboxId())
} }
// Do not return error if the id doesn't exist. // Do not return error if the id doesn't exist.
log.Tracef("RemovePodSandbox called for sandbox %q that does not exist", log.Tracef("RemovePodSandbox called for sandbox %q that does not exist",
@ -49,12 +48,12 @@ func (c *criContainerdService) RemovePodSandbox(ctx context.Context, r *runtime.
// Return error if sandbox container is still running. // Return error if sandbox container is still running.
if sandbox.Status.Get().State == sandboxstore.StateReady { if sandbox.Status.Get().State == sandboxstore.StateReady {
return nil, fmt.Errorf("sandbox container %q is not fully stopped", id) return nil, errors.Errorf("sandbox container %q is not fully stopped", id)
} }
// Return error if sandbox network namespace is not closed yet. // Return error if sandbox network namespace is not closed yet.
if sandbox.NetNS != nil && !sandbox.NetNS.Closed() { if sandbox.NetNS != nil && !sandbox.NetNS.Closed() {
return nil, fmt.Errorf("sandbox network namespace %q is not fully closed", sandbox.NetNS.GetPath()) return nil, errors.Errorf("sandbox network namespace %q is not fully closed", sandbox.NetNS.GetPath())
} }
// Remove all containers inside the sandbox. // Remove all containers inside the sandbox.
@ -69,21 +68,26 @@ func (c *criContainerdService) RemovePodSandbox(ctx context.Context, r *runtime.
} }
_, err = c.RemoveContainer(ctx, &runtime.RemoveContainerRequest{ContainerId: cntr.ID}) _, err = c.RemoveContainer(ctx, &runtime.RemoveContainerRequest{ContainerId: cntr.ID})
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to remove container %q: %v", cntr.ID, err) return nil, errors.Wrapf(err, "failed to remove container %q", cntr.ID)
} }
} }
// Cleanup the sandbox root directory. // Cleanup the sandbox root directories.
sandboxRootDir := getSandboxRootDir(c.config.RootDir, id) sandboxRootDir := c.getSandboxRootDir(id)
if err := system.EnsureRemoveAll(sandboxRootDir); err != nil { if err := system.EnsureRemoveAll(sandboxRootDir); err != nil {
return nil, fmt.Errorf("failed to remove sandbox root directory %q: %v", return nil, errors.Wrapf(err, "failed to remove sandbox root directory %q",
sandboxRootDir, err) sandboxRootDir)
}
volatileSandboxRootDir := c.getVolatileSandboxRootDir(id)
if err := system.EnsureRemoveAll(volatileSandboxRootDir); err != nil {
return nil, errors.Wrapf(err, "failed to remove volatile sandbox root directory %q",
volatileSandboxRootDir)
} }
// Delete sandbox container. // Delete sandbox container.
if err := sandbox.Container.Delete(ctx, containerd.WithSnapshotCleanup); err != nil { if err := sandbox.Container.Delete(ctx, containerd.WithSnapshotCleanup); err != nil {
if !errdefs.IsNotFound(err) { if !errdefs.IsNotFound(err) {
return nil, fmt.Errorf("failed to delete sandbox container %q: %v", id, err) return nil, errors.Wrapf(err, "failed to delete sandbox container %q", id)
} }
log.Tracef("Remove called for sandbox container %q that does not exist", id) log.Tracef("Remove called for sandbox container %q that does not exist", id)
} }

View File

@ -26,16 +26,18 @@ import (
"github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/linux/runctypes" "github.com/containerd/containerd/linux/runctypes"
"github.com/containerd/containerd/oci" "github.com/containerd/containerd/oci"
cni "github.com/containerd/go-cni"
"github.com/containerd/typeurl" "github.com/containerd/typeurl"
"github.com/cri-o/ocicni/pkg/ocicni"
imagespec "github.com/opencontainers/image-spec/specs-go/v1" imagespec "github.com/opencontainers/image-spec/specs-go/v1"
runtimespec "github.com/opencontainers/runtime-spec/specs-go" runtimespec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"golang.org/x/net/context" "golang.org/x/net/context"
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
"github.com/containerd/cri/pkg/annotations" "github.com/containerd/cri/pkg/annotations"
criconfig "github.com/containerd/cri/pkg/config"
customopts "github.com/containerd/cri/pkg/containerd/opts" customopts "github.com/containerd/cri/pkg/containerd/opts"
ctrdutil "github.com/containerd/cri/pkg/containerd/util" ctrdutil "github.com/containerd/cri/pkg/containerd/util"
"github.com/containerd/cri/pkg/log" "github.com/containerd/cri/pkg/log"
@ -50,7 +52,7 @@ func init() {
// RunPodSandbox creates and starts a pod-level sandbox. Runtimes should ensure // RunPodSandbox creates and starts a pod-level sandbox. Runtimes should ensure
// the sandbox is in ready state. // the sandbox is in ready state.
func (c *criContainerdService) RunPodSandbox(ctx context.Context, r *runtime.RunPodSandboxRequest) (_ *runtime.RunPodSandboxResponse, retErr error) { func (c *criService) RunPodSandbox(ctx context.Context, r *runtime.RunPodSandboxRequest) (_ *runtime.RunPodSandboxResponse, retErr error) {
config := r.GetConfig() config := r.GetConfig()
// Generate unique id and name for the sandbox and reserve the name. // Generate unique id and name for the sandbox and reserve the name.
@ -60,7 +62,7 @@ func (c *criContainerdService) RunPodSandbox(ctx context.Context, r *runtime.Run
// Reserve the sandbox name to avoid concurrent `RunPodSandbox` request starting the // Reserve the sandbox name to avoid concurrent `RunPodSandbox` request starting the
// same sandbox. // same sandbox.
if err := c.sandboxNameIndex.Reserve(name, id); err != nil { if err := c.sandboxNameIndex.Reserve(name, id); err != nil {
return nil, fmt.Errorf("failed to reserve sandbox name %q: %v", name, err) return nil, errors.Wrapf(err, "failed to reserve sandbox name %q", name)
} }
defer func() { defer func() {
// Release the name if the function returns with an error. // Release the name if the function returns with an error.
@ -84,7 +86,7 @@ func (c *criContainerdService) RunPodSandbox(ctx context.Context, r *runtime.Run
// Ensure sandbox container image snapshot. // Ensure sandbox container image snapshot.
image, err := c.ensureImageExists(ctx, c.config.SandboxImage) image, err := c.ensureImageExists(ctx, c.config.SandboxImage)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to get sandbox image %q: %v", c.config.SandboxImage, err) return nil, errors.Wrapf(err, "failed to get sandbox image %q", c.config.SandboxImage)
} }
securityContext := config.GetLinux().GetSecurityContext() securityContext := config.GetLinux().GetSecurityContext()
//Create Network Namespace if it is not in host network //Create Network Namespace if it is not in host network
@ -96,7 +98,7 @@ func (c *criContainerdService) RunPodSandbox(ctx context.Context, r *runtime.Run
// be used. // be used.
sandbox.NetNS, err = sandboxstore.NewNetNS() sandbox.NetNS, err = sandboxstore.NewNetNS()
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to create network namespace for sandbox %q: %v", id, err) return nil, errors.Wrapf(err, "failed to create network namespace for sandbox %q", id)
} }
sandbox.NetNSPath = sandbox.NetNS.GetPath() sandbox.NetNSPath = sandbox.NetNS.GetPath()
defer func() { defer func() {
@ -107,53 +109,38 @@ func (c *criContainerdService) RunPodSandbox(ctx context.Context, r *runtime.Run
sandbox.NetNSPath = "" sandbox.NetNSPath = ""
} }
}() }()
if !c.config.EnableIPv6DAD {
// It's a known issue that IPv6 DAD increases sandbox start latency by several seconds.
// Disable it when it's not enabled to avoid the latency.
// See:
// * https://github.com/kubernetes/kubernetes/issues/54651
// * https://www.agwa.name/blog/post/beware_the_ipv6_dad_race_condition
if err := disableNetNSDAD(sandbox.NetNSPath); err != nil {
return nil, fmt.Errorf("failed to disable DAD for sandbox %q: %v", id, err)
}
}
// Setup network for sandbox. // Setup network for sandbox.
podNetwork := ocicni.PodNetwork{ // Certain VM based solutions like clear containers (Issue containerd/cri-containerd#524)
Name: config.GetMetadata().GetName(), // rely on the assumption that CRI shim will not be querying the network namespace to check the
Namespace: config.GetMetadata().GetNamespace(), // network states such as IP.
ID: id, // In future runtime implementation should avoid relying on CRI shim implementation details.
NetNS: sandbox.NetNSPath, // In this case however caching the IP will add a subtle performance enhancement by avoiding
PortMappings: toCNIPortMappings(config.GetPortMappings()), // calls to network namespace of the pod to query the IP of the veth interface on every
} // SandboxStatus request.
if _, err = c.netPlugin.SetUpPod(podNetwork); err != nil { sandbox.IP, err = c.setupPod(id, sandbox.NetNSPath, config)
return nil, fmt.Errorf("failed to setup network for sandbox %q: %v", id, err) if err != nil {
return nil, errors.Wrapf(err, "failed to setup network for sandbox %q", id)
} }
defer func() { defer func() {
if retErr != nil { if retErr != nil {
// Teardown network if an error is returned. // Teardown network if an error is returned.
if err := c.netPlugin.TearDownPod(podNetwork); err != nil { if err := c.teardownPod(id, sandbox.NetNSPath, config); err != nil {
logrus.WithError(err).Errorf("Failed to destroy network for sandbox %q", id) logrus.WithError(err).Errorf("Failed to destroy network for sandbox %q", id)
} }
} }
}() }()
ip, err := c.netPlugin.GetPodNetworkStatus(podNetwork) }
ociRuntime, err := c.getSandboxRuntime(config)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to get network status for sandbox %q: %v", id, err) return nil, errors.Wrap(err, "failed to get sandbox runtime")
}
// Certain VM based solutions like clear containers (Issue containerd/cri#524)
// rely on the assumption that CRI shim will not be querying the network namespace to check the
// network states such as IP.
// In furture runtime implementation should avoid relying on CRI shim implementation details.
// In this case however caching the IP will add a subtle performance enhancement by avoiding
// calls to network namespace of the pod to query the IP of the veth interface on every
// SandboxStatus request.
sandbox.IP = ip
} }
logrus.Debugf("Use OCI %+v for sandbox %q", ociRuntime, id)
// Create sandbox container. // Create sandbox container.
spec, err := c.generateSandboxContainerSpec(id, config, &image.ImageSpec.Config, sandbox.NetNSPath) spec, err := c.generateSandboxContainerSpec(id, config, &image.ImageSpec.Config, sandbox.NetNSPath)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to generate sandbox container spec: %v", err) return nil, errors.Wrap(err, "failed to generate sandbox container spec")
} }
logrus.Debugf("Sandbox container spec: %+v", spec) logrus.Debugf("Sandbox container spec: %+v", spec)
@ -167,7 +154,7 @@ func (c *criContainerdService) RunPodSandbox(ctx context.Context, r *runtime.Run
securityContext.GetPrivileged(), securityContext.GetPrivileged(),
c.seccompEnabled) c.seccompEnabled)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to generate seccomp spec opts: %v", err) return nil, errors.Wrap(err, "failed to generate seccomp spec opts")
} }
if seccompSpecOpts != nil { if seccompSpecOpts != nil {
specOpts = append(specOpts, seccompSpecOpts) specOpts = append(specOpts, seccompSpecOpts)
@ -182,15 +169,15 @@ func (c *criContainerdService) RunPodSandbox(ctx context.Context, r *runtime.Run
containerd.WithContainerLabels(sandboxLabels), containerd.WithContainerLabels(sandboxLabels),
containerd.WithContainerExtension(sandboxMetadataExtension, &sandbox.Metadata), containerd.WithContainerExtension(sandboxMetadataExtension, &sandbox.Metadata),
containerd.WithRuntime( containerd.WithRuntime(
c.config.ContainerdConfig.Runtime, ociRuntime.Type,
&runctypes.RuncOptions{ &runctypes.RuncOptions{
Runtime: c.config.ContainerdConfig.RuntimeEngine, Runtime: ociRuntime.Engine,
RuntimeRoot: c.config.ContainerdConfig.RuntimeRoot, RuntimeRoot: ociRuntime.Root,
SystemdCgroup: c.config.SystemdCgroup})} // TODO (mikebrow): add CriuPath when we add support for pause SystemdCgroup: c.config.SystemdCgroup})} // TODO (mikebrow): add CriuPath when we add support for pause
container, err := c.client.NewContainer(ctx, id, opts...) container, err := c.client.NewContainer(ctx, id, opts...)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to create containerd container: %v", err) return nil, errors.Wrap(err, "failed to create containerd container")
} }
defer func() { defer func() {
if retErr != nil { if retErr != nil {
@ -202,11 +189,11 @@ func (c *criContainerdService) RunPodSandbox(ctx context.Context, r *runtime.Run
} }
}() }()
// Create sandbox container root directory. // Create sandbox container root directories.
sandboxRootDir := getSandboxRootDir(c.config.RootDir, id) sandboxRootDir := c.getSandboxRootDir(id)
if err := c.os.MkdirAll(sandboxRootDir, 0755); err != nil { if err := c.os.MkdirAll(sandboxRootDir, 0755); err != nil {
return nil, fmt.Errorf("failed to create sandbox root directory %q: %v", return nil, errors.Wrapf(err, "failed to create sandbox root directory %q",
sandboxRootDir, err) sandboxRootDir)
} }
defer func() { defer func() {
if retErr != nil { if retErr != nil {
@ -217,14 +204,28 @@ func (c *criContainerdService) RunPodSandbox(ctx context.Context, r *runtime.Run
} }
} }
}() }()
volatileSandboxRootDir := c.getVolatileSandboxRootDir(id)
// Setup sandbox /dev/shm, /etc/hosts and /etc/resolv.conf. if err := c.os.MkdirAll(volatileSandboxRootDir, 0755); err != nil {
if err = c.setupSandboxFiles(sandboxRootDir, config); err != nil { return nil, errors.Wrapf(err, "failed to create volatile sandbox root directory %q",
return nil, fmt.Errorf("failed to setup sandbox files: %v", err) volatileSandboxRootDir)
} }
defer func() { defer func() {
if retErr != nil { if retErr != nil {
if err = c.unmountSandboxFiles(sandboxRootDir, config); err != nil { // Cleanup the volatile sandbox root directory.
if err := c.os.RemoveAll(volatileSandboxRootDir); err != nil {
logrus.WithError(err).Errorf("Failed to remove volatile sandbox root directory %q",
volatileSandboxRootDir)
}
}
}()
// Setup sandbox /dev/shm, /etc/hosts and /etc/resolv.conf.
if err = c.setupSandboxFiles(id, config); err != nil {
return nil, errors.Wrapf(err, "failed to setup sandbox files")
}
defer func() {
if retErr != nil {
if err = c.unmountSandboxFiles(id, config); err != nil {
logrus.WithError(err).Errorf("Failed to unmount sandbox files in %q", logrus.WithError(err).Errorf("Failed to unmount sandbox files in %q",
sandboxRootDir) sandboxRootDir)
} }
@ -234,19 +235,19 @@ func (c *criContainerdService) RunPodSandbox(ctx context.Context, r *runtime.Run
// Update sandbox created timestamp. // Update sandbox created timestamp.
info, err := container.Info(ctx) info, err := container.Info(ctx)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to get sandbox container info: %v", err) return nil, errors.Wrap(err, "failed to get sandbox container info")
} }
if err := sandbox.Status.Update(func(status sandboxstore.Status) (sandboxstore.Status, error) { if err := sandbox.Status.Update(func(status sandboxstore.Status) (sandboxstore.Status, error) {
status.CreatedAt = info.CreatedAt status.CreatedAt = info.CreatedAt
return status, nil return status, nil
}); err != nil { }); err != nil {
return nil, fmt.Errorf("failed to update sandbox created timestamp: %v", err) return nil, errors.Wrap(err, "failed to update sandbox created timestamp")
} }
// Add sandbox into sandbox store in UNKNOWN state. // Add sandbox into sandbox store in UNKNOWN state.
sandbox.Container = container sandbox.Container = container
if err := c.sandboxStore.Add(sandbox); err != nil { if err := c.sandboxStore.Add(sandbox); err != nil {
return nil, fmt.Errorf("failed to add sandbox %+v into store: %v", sandbox, err) return nil, errors.Wrapf(err, "failed to add sandbox %+v into store", sandbox)
} }
defer func() { defer func() {
// Delete sandbox from sandbox store if there is an error. // Delete sandbox from sandbox store if there is an error.
@ -287,7 +288,7 @@ func (c *criContainerdService) RunPodSandbox(ctx context.Context, r *runtime.Run
// We don't need stdio for sandbox container. // We don't need stdio for sandbox container.
task, err := container.NewTask(ctx, containerdio.NullIO) task, err := container.NewTask(ctx, containerdio.NullIO)
if err != nil { if err != nil {
return status, fmt.Errorf("failed to create containerd task: %v", err) return status, errors.Wrap(err, "failed to create containerd task")
} }
defer func() { defer func() {
if retErr != nil { if retErr != nil {
@ -302,8 +303,7 @@ func (c *criContainerdService) RunPodSandbox(ctx context.Context, r *runtime.Run
}() }()
if err := task.Start(ctx); err != nil { if err := task.Start(ctx); err != nil {
return status, fmt.Errorf("failed to start sandbox container task %q: %v", return status, errors.Wrapf(err, "failed to start sandbox container task %q", id)
id, err)
} }
// Set the pod sandbox as ready after successfully start sandbox container. // Set the pod sandbox as ready after successfully start sandbox container.
@ -311,13 +311,13 @@ func (c *criContainerdService) RunPodSandbox(ctx context.Context, r *runtime.Run
status.State = sandboxstore.StateReady status.State = sandboxstore.StateReady
return status, nil return status, nil
}); err != nil { }); err != nil {
return nil, fmt.Errorf("failed to start sandbox container: %v", err) return nil, errors.Wrap(err, "failed to start sandbox container")
} }
return &runtime.RunPodSandboxResponse{PodSandboxId: id}, nil return &runtime.RunPodSandboxResponse{PodSandboxId: id}, nil
} }
func (c *criContainerdService) generateSandboxContainerSpec(id string, config *runtime.PodSandboxConfig, func (c *criService) generateSandboxContainerSpec(id string, config *runtime.PodSandboxConfig,
imageConfig *imagespec.ImageConfig, nsPath string) (*runtimespec.Spec, error) { imageConfig *imagespec.ImageConfig, nsPath string) (*runtimespec.Spec, error) {
// Creates a spec Generator with the default spec. // Creates a spec Generator with the default spec.
// TODO(random-liu): [P1] Compare the default settings with docker and containerd default. // TODO(random-liu): [P1] Compare the default settings with docker and containerd default.
@ -338,7 +338,7 @@ func (c *criContainerdService) generateSandboxContainerSpec(id string, config *r
if len(imageConfig.Entrypoint) == 0 { if len(imageConfig.Entrypoint) == 0 {
// Pause image must have entrypoint. // Pause image must have entrypoint.
return nil, fmt.Errorf("invalid empty entrypoint in image config %+v", imageConfig) return nil, errors.Errorf("invalid empty entrypoint in image config %+v", imageConfig)
} }
// Set process commands. // Set process commands.
g.SetProcessArgs(append(imageConfig.Entrypoint, imageConfig.Cmd...)) g.SetProcessArgs(append(imageConfig.Entrypoint, imageConfig.Cmd...))
@ -383,7 +383,7 @@ func (c *criContainerdService) generateSandboxContainerSpec(id string, config *r
selinuxOpt := securityContext.GetSelinuxOptions() selinuxOpt := securityContext.GetSelinuxOptions()
processLabel, mountLabel, err := initSelinuxOpts(selinuxOpt) processLabel, mountLabel, err := initSelinuxOpts(selinuxOpt)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to init selinux options %+v: %v", securityContext.GetSelinuxOptions(), err) return nil, errors.Wrapf(err, "failed to init selinux options %+v", securityContext.GetSelinuxOptions())
} }
g.SetProcessSelinuxLabel(processLabel) g.SetProcessSelinuxLabel(processLabel)
g.SetLinuxMountLabel(mountLabel) g.SetLinuxMountLabel(mountLabel)
@ -412,11 +412,11 @@ func (c *criContainerdService) generateSandboxContainerSpec(id string, config *r
// setupSandboxFiles sets up necessary sandbox files including /dev/shm, /etc/hosts // setupSandboxFiles sets up necessary sandbox files including /dev/shm, /etc/hosts
// and /etc/resolv.conf. // and /etc/resolv.conf.
func (c *criContainerdService) setupSandboxFiles(rootDir string, config *runtime.PodSandboxConfig) error { func (c *criService) setupSandboxFiles(id string, config *runtime.PodSandboxConfig) error {
// TODO(random-liu): Consider whether we should maintain /etc/hosts and /etc/resolv.conf in kubelet. // TODO(random-liu): Consider whether we should maintain /etc/hosts and /etc/resolv.conf in kubelet.
sandboxEtcHosts := getSandboxHosts(rootDir) sandboxEtcHosts := c.getSandboxHosts(id)
if err := c.os.CopyFile(etcHosts, sandboxEtcHosts, 0644); err != nil { if err := c.os.CopyFile(etcHosts, sandboxEtcHosts, 0644); err != nil {
return fmt.Errorf("failed to generate sandbox hosts file %q: %v", sandboxEtcHosts, err) return errors.Wrapf(err, "failed to generate sandbox hosts file %q", sandboxEtcHosts)
} }
// Set DNS options. Maintain a resolv.conf for the sandbox. // Set DNS options. Maintain a resolv.conf for the sandbox.
@ -425,36 +425,36 @@ func (c *criContainerdService) setupSandboxFiles(rootDir string, config *runtime
if dnsConfig := config.GetDnsConfig(); dnsConfig != nil { if dnsConfig := config.GetDnsConfig(); dnsConfig != nil {
resolvContent, err = parseDNSOptions(dnsConfig.Servers, dnsConfig.Searches, dnsConfig.Options) resolvContent, err = parseDNSOptions(dnsConfig.Servers, dnsConfig.Searches, dnsConfig.Options)
if err != nil { if err != nil {
return fmt.Errorf("failed to parse sandbox DNSConfig %+v: %v", dnsConfig, err) return errors.Wrapf(err, "failed to parse sandbox DNSConfig %+v", dnsConfig)
} }
} }
resolvPath := getResolvPath(rootDir) resolvPath := c.getResolvPath(id)
if resolvContent == "" { if resolvContent == "" {
// copy host's resolv.conf to resolvPath // copy host's resolv.conf to resolvPath
err = c.os.CopyFile(resolvConfPath, resolvPath, 0644) err = c.os.CopyFile(resolvConfPath, resolvPath, 0644)
if err != nil { if err != nil {
return fmt.Errorf("failed to copy host's resolv.conf to %q: %v", resolvPath, err) return errors.Wrapf(err, "failed to copy host's resolv.conf to %q", resolvPath)
} }
} else { } else {
err = c.os.WriteFile(resolvPath, []byte(resolvContent), 0644) err = c.os.WriteFile(resolvPath, []byte(resolvContent), 0644)
if err != nil { if err != nil {
return fmt.Errorf("failed to write resolv content to %q: %v", resolvPath, err) return errors.Wrapf(err, "failed to write resolv content to %q", resolvPath)
} }
} }
// Setup sandbox /dev/shm. // Setup sandbox /dev/shm.
if config.GetLinux().GetSecurityContext().GetNamespaceOptions().GetIpc() == runtime.NamespaceMode_NODE { if config.GetLinux().GetSecurityContext().GetNamespaceOptions().GetIpc() == runtime.NamespaceMode_NODE {
if _, err := c.os.Stat(devShm); err != nil { if _, err := c.os.Stat(devShm); err != nil {
return fmt.Errorf("host %q is not available for host ipc: %v", devShm, err) return errors.Wrapf(err, "host %q is not available for host ipc", devShm)
} }
} else { } else {
sandboxDevShm := getSandboxDevShm(rootDir) sandboxDevShm := c.getSandboxDevShm(id)
if err := c.os.MkdirAll(sandboxDevShm, 0700); err != nil { if err := c.os.MkdirAll(sandboxDevShm, 0700); err != nil {
return fmt.Errorf("failed to create sandbox shm: %v", err) return errors.Wrap(err, "failed to create sandbox shm")
} }
shmproperty := fmt.Sprintf("mode=1777,size=%d", defaultShmSize) shmproperty := fmt.Sprintf("mode=1777,size=%d", defaultShmSize)
if err := c.os.Mount("shm", sandboxDevShm, "tmpfs", uintptr(unix.MS_NOEXEC|unix.MS_NOSUID|unix.MS_NODEV), shmproperty); err != nil { if err := c.os.Mount("shm", sandboxDevShm, "tmpfs", uintptr(unix.MS_NOEXEC|unix.MS_NOSUID|unix.MS_NODEV), shmproperty); err != nil {
return fmt.Errorf("failed to mount sandbox shm: %v", err) return errors.Wrap(err, "failed to mount sandbox shm")
} }
} }
@ -467,7 +467,7 @@ func parseDNSOptions(servers, searches, options []string) (string, error) {
resolvContent := "" resolvContent := ""
if len(searches) > maxDNSSearches { if len(searches) > maxDNSSearches {
return "", fmt.Errorf("DNSOption.Searches has more than 6 domains") return "", errors.New("DNSOption.Searches has more than 6 domains")
} }
if len(searches) > 0 { if len(searches) > 0 {
@ -489,23 +489,52 @@ func parseDNSOptions(servers, searches, options []string) (string, error) {
// remove these files. Unmount should *NOT* return error when: // remove these files. Unmount should *NOT* return error when:
// 1) The mount point is already unmounted. // 1) The mount point is already unmounted.
// 2) The mount point doesn't exist. // 2) The mount point doesn't exist.
func (c *criContainerdService) unmountSandboxFiles(rootDir string, config *runtime.PodSandboxConfig) error { func (c *criService) unmountSandboxFiles(id string, config *runtime.PodSandboxConfig) error {
if config.GetLinux().GetSecurityContext().GetNamespaceOptions().GetIpc() != runtime.NamespaceMode_NODE { if config.GetLinux().GetSecurityContext().GetNamespaceOptions().GetIpc() != runtime.NamespaceMode_NODE {
if err := c.os.Unmount(getSandboxDevShm(rootDir), unix.MNT_DETACH); err != nil && !os.IsNotExist(err) { path, err := c.os.FollowSymlinkInScope(c.getSandboxDevShm(id), "/")
return err if err != nil {
return errors.Wrap(err, "failed to follow symlink")
}
if err := c.os.Unmount(path, unix.MNT_DETACH); err != nil && !os.IsNotExist(err) {
return errors.Wrapf(err, "failed to unmount %q", path)
} }
} }
return nil return nil
} }
// setupPod setups up the network for a pod
func (c *criService) setupPod(id string, path string, config *runtime.PodSandboxConfig) (string, error) {
if c.netPlugin == nil {
return "", errors.New("cni config not intialized")
}
labels := getPodCNILabels(id, config)
result, err := c.netPlugin.Setup(id,
path,
cni.WithLabels(labels),
cni.WithCapabilityPortMap(toCNIPortMappings(config.GetPortMappings())))
if err != nil {
return "", err
}
// Check if the default interface has IP config
if configs, ok := result.Interfaces[defaultIfName]; ok && len(configs.IPConfigs) > 0 {
return configs.IPConfigs[0].IP.String(), nil
}
// If it comes here then the result was invalid so destroy the pod network and return error
if err := c.teardownPod(id, path, config); err != nil {
logrus.WithError(err).Errorf("Failed to destroy network for sandbox %q", id)
}
return "", errors.Errorf("failed to find network info for sandbox %q", id)
}
// toCNIPortMappings converts CRI port mappings to CNI. // toCNIPortMappings converts CRI port mappings to CNI.
func toCNIPortMappings(criPortMappings []*runtime.PortMapping) []ocicni.PortMapping { func toCNIPortMappings(criPortMappings []*runtime.PortMapping) []cni.PortMapping {
var portMappings []ocicni.PortMapping var portMappings []cni.PortMapping
for _, mapping := range criPortMappings { for _, mapping := range criPortMappings {
if mapping.HostPort <= 0 { if mapping.HostPort <= 0 {
continue continue
} }
portMappings = append(portMappings, ocicni.PortMapping{ portMappings = append(portMappings, cni.PortMapping{
HostPort: mapping.HostPort, HostPort: mapping.HostPort,
ContainerPort: mapping.ContainerPort, ContainerPort: mapping.ContainerPort,
Protocol: strings.ToLower(mapping.Protocol.String()), Protocol: strings.ToLower(mapping.Protocol.String()),
@ -514,3 +543,48 @@ func toCNIPortMappings(criPortMappings []*runtime.PortMapping) []ocicni.PortMapp
} }
return portMappings return portMappings
} }
// untrustedWorkload returns true if the sandbox contains untrusted workload.
func untrustedWorkload(config *runtime.PodSandboxConfig) bool {
return config.GetAnnotations()[annotations.UntrustedWorkload] == "true"
}
// hostPrivilegedSandbox returns true if the sandbox configuration
// requires additional host privileges for the sandbox.
func hostPrivilegedSandbox(config *runtime.PodSandboxConfig) bool {
securityContext := config.GetLinux().GetSecurityContext()
if securityContext.GetPrivileged() {
return true
}
namespaceOptions := securityContext.GetNamespaceOptions()
if namespaceOptions.GetNetwork() == runtime.NamespaceMode_NODE ||
namespaceOptions.GetPid() == runtime.NamespaceMode_NODE ||
namespaceOptions.GetIpc() == runtime.NamespaceMode_NODE {
return true
}
return false
}
// getSandboxRuntime returns the runtime configuration for sandbox.
// If the sandbox contains untrusted workload, runtime for untrusted workload will be returned,
// or else default runtime will be returned.
func (c *criService) getSandboxRuntime(config *runtime.PodSandboxConfig) (criconfig.Runtime, error) {
untrusted := false
if untrustedWorkload(config) {
// TODO(random-liu): Figure out we should return error or not.
if hostPrivilegedSandbox(config) {
return criconfig.Runtime{}, errors.New("untrusted workload with host privilege is not allowed")
}
untrusted = true
}
if untrusted {
if c.config.ContainerdConfig.UntrustedWorkloadRuntime.Type == "" {
return criconfig.Runtime{}, errors.New("no runtime for untrusted workload is configured")
}
return c.config.ContainerdConfig.UntrustedWorkloadRuntime, nil
}
return c.config.ContainerdConfig.DefaultRuntime, nil
}

View File

@ -18,22 +18,23 @@ package server
import ( import (
"encoding/json" "encoding/json"
"fmt"
"github.com/containerd/containerd" "github.com/containerd/containerd"
"github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/errdefs"
runtimespec "github.com/opencontainers/runtime-spec/specs-go" runtimespec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"golang.org/x/net/context" "golang.org/x/net/context"
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
criconfig "github.com/containerd/cri/pkg/config"
sandboxstore "github.com/containerd/cri/pkg/store/sandbox" sandboxstore "github.com/containerd/cri/pkg/store/sandbox"
) )
// PodSandboxStatus returns the status of the PodSandbox. // PodSandboxStatus returns the status of the PodSandbox.
func (c *criContainerdService) PodSandboxStatus(ctx context.Context, r *runtime.PodSandboxStatusRequest) (*runtime.PodSandboxStatusResponse, error) { func (c *criService) PodSandboxStatus(ctx context.Context, r *runtime.PodSandboxStatusRequest) (*runtime.PodSandboxStatusResponse, error) {
sandbox, err := c.sandboxStore.Get(r.GetPodSandboxId()) sandbox, err := c.sandboxStore.Get(r.GetPodSandboxId())
if err != nil { if err != nil {
return nil, fmt.Errorf("an error occurred when try to find sandbox: %v", err) return nil, errors.Wrap(err, "an error occurred when try to find sandbox")
} }
ip := c.getIP(sandbox) ip := c.getIP(sandbox)
@ -45,7 +46,7 @@ func (c *criContainerdService) PodSandboxStatus(ctx context.Context, r *runtime.
// Generate verbose information. // Generate verbose information.
info, err := toCRISandboxInfo(ctx, sandbox) info, err := toCRISandboxInfo(ctx, sandbox)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to get verbose sandbox container info: %v", err) return nil, errors.Wrap(err, "failed to get verbose sandbox container info")
} }
return &runtime.PodSandboxStatusResponse{ return &runtime.PodSandboxStatusResponse{
@ -54,7 +55,7 @@ func (c *criContainerdService) PodSandboxStatus(ctx context.Context, r *runtime.
}, nil }, nil
} }
func (c *criContainerdService) getIP(sandbox sandboxstore.Sandbox) string { func (c *criService) getIP(sandbox sandboxstore.Sandbox) string {
config := sandbox.Config config := sandbox.Config
if config.GetLinux().GetSecurityContext().GetNamespaceOptions().GetNetwork() == runtime.NamespaceMode_NODE { if config.GetLinux().GetSecurityContext().GetNamespaceOptions().GetNetwork() == runtime.NamespaceMode_NODE {
@ -107,6 +108,7 @@ type sandboxInfo struct {
Image string `json:"image"` Image string `json:"image"`
SnapshotKey string `json:"snapshotKey"` SnapshotKey string `json:"snapshotKey"`
Snapshotter string `json:"snapshotter"` Snapshotter string `json:"snapshotter"`
Runtime *criconfig.Runtime `json:"runtime"`
Config *runtime.PodSandboxConfig `json:"config"` Config *runtime.PodSandboxConfig `json:"config"`
RuntimeSpec *runtimespec.Spec `json:"runtimeSpec"` RuntimeSpec *runtimespec.Spec `json:"runtimeSpec"`
} }
@ -116,14 +118,14 @@ func toCRISandboxInfo(ctx context.Context, sandbox sandboxstore.Sandbox) (map[st
container := sandbox.Container container := sandbox.Container
task, err := container.Task(ctx, nil) task, err := container.Task(ctx, nil)
if err != nil && !errdefs.IsNotFound(err) { if err != nil && !errdefs.IsNotFound(err) {
return nil, fmt.Errorf("failed to get sandbox container task: %v", err) return nil, errors.Wrap(err, "failed to get sandbox container task")
} }
var processStatus containerd.ProcessStatus var processStatus containerd.ProcessStatus
if task != nil { if task != nil {
taskStatus, err := task.Status(ctx) taskStatus, err := task.Status(ctx)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to get task status: %v", err) return nil, errors.Wrap(err, "failed to get task status")
} }
processStatus = taskStatus.Status processStatus = taskStatus.Status
@ -148,13 +150,13 @@ func toCRISandboxInfo(ctx context.Context, sandbox sandboxstore.Sandbox) (map[st
spec, err := container.Spec(ctx) spec, err := container.Spec(ctx)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to get sandbox container runtime spec: %v", err) return nil, errors.Wrap(err, "failed to get sandbox container runtime spec")
} }
si.RuntimeSpec = spec si.RuntimeSpec = spec
ctrInfo, err := container.Info(ctx) ctrInfo, err := container.Info(ctx)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to get sandbox container info: %v", err) return nil, errors.Wrap(err, "failed to get sandbox container info")
} }
// Do not use config.SandboxImage because the configuration might // Do not use config.SandboxImage because the configuration might
// be changed during restart. It may not reflect the actual image // be changed during restart. It may not reflect the actual image
@ -163,9 +165,15 @@ func toCRISandboxInfo(ctx context.Context, sandbox sandboxstore.Sandbox) (map[st
si.SnapshotKey = ctrInfo.SnapshotKey si.SnapshotKey = ctrInfo.SnapshotKey
si.Snapshotter = ctrInfo.Snapshotter si.Snapshotter = ctrInfo.Snapshotter
ociRuntime, err := getRuntimeConfigFromContainerInfo(ctrInfo)
if err != nil {
return nil, errors.Wrap(err, "failed to get sandbox container runtime config")
}
si.Runtime = &ociRuntime
infoBytes, err := json.Marshal(si) infoBytes, err := json.Marshal(si)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to marshal info %v: %v", si, err) return nil, errors.Wrapf(err, "failed to marshal info %v", si)
} }
return map[string]string{ return map[string]string{
"info": string(infoBytes), "info": string(infoBytes),

View File

@ -17,13 +17,13 @@ limitations under the License.
package server package server
import ( import (
"fmt"
"os" "os"
"time" "time"
"github.com/containerd/containerd" "github.com/containerd/containerd"
"github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/errdefs"
"github.com/cri-o/ocicni/pkg/ocicni" cni "github.com/containerd/go-cni"
"github.com/pkg/errors"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"golang.org/x/net/context" "golang.org/x/net/context"
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
@ -33,11 +33,11 @@ import (
// StopPodSandbox stops the sandbox. If there are any running containers in the // StopPodSandbox stops the sandbox. If there are any running containers in the
// sandbox, they should be forcibly terminated. // sandbox, they should be forcibly terminated.
func (c *criContainerdService) StopPodSandbox(ctx context.Context, r *runtime.StopPodSandboxRequest) (*runtime.StopPodSandboxResponse, error) { func (c *criService) StopPodSandbox(ctx context.Context, r *runtime.StopPodSandboxRequest) (*runtime.StopPodSandboxResponse, error) {
sandbox, err := c.sandboxStore.Get(r.GetPodSandboxId()) sandbox, err := c.sandboxStore.Get(r.GetPodSandboxId())
if err != nil { if err != nil {
return nil, fmt.Errorf("an error occurred when try to find sandbox %q: %v", return nil, errors.Wrapf(err, "an error occurred when try to find sandbox %q",
r.GetPodSandboxId(), err) r.GetPodSandboxId())
} }
// Use the full sandbox id. // Use the full sandbox id.
id := sandbox.ID id := sandbox.ID
@ -54,7 +54,7 @@ func (c *criContainerdService) StopPodSandbox(ctx context.Context, r *runtime.St
// Forcibly stop the container. Do not use `StopContainer`, because it introduces a race // Forcibly stop the container. Do not use `StopContainer`, because it introduces a race
// if a container is removed after list. // if a container is removed after list.
if err = c.stopContainer(ctx, container, 0); err != nil { if err = c.stopContainer(ctx, container, 0); err != nil {
return nil, fmt.Errorf("failed to stop container %q: %v", container.ID, err) return nil, errors.Wrapf(err, "failed to stop container %q", container.ID)
} }
} }
@ -62,17 +62,11 @@ func (c *criContainerdService) StopPodSandbox(ctx context.Context, r *runtime.St
if sandbox.NetNSPath != "" && sandbox.NetNS != nil { if sandbox.NetNSPath != "" && sandbox.NetNS != nil {
if _, err := os.Stat(sandbox.NetNSPath); err != nil { if _, err := os.Stat(sandbox.NetNSPath); err != nil {
if !os.IsNotExist(err) { if !os.IsNotExist(err) {
return nil, fmt.Errorf("failed to stat network namespace path %s :%v", sandbox.NetNSPath, err) return nil, errors.Wrapf(err, "failed to stat network namespace path %s", sandbox.NetNSPath)
} }
} else { } else {
if teardownErr := c.netPlugin.TearDownPod(ocicni.PodNetwork{ if teardownErr := c.teardownPod(id, sandbox.NetNSPath, sandbox.Config); teardownErr != nil {
Name: sandbox.Config.GetMetadata().GetName(), return nil, errors.Wrapf(teardownErr, "failed to destroy network for sandbox %q", id)
Namespace: sandbox.Config.GetMetadata().GetNamespace(),
ID: id,
NetNS: sandbox.NetNSPath,
PortMappings: toCNIPortMappings(sandbox.Config.GetPortMappings()),
}); teardownErr != nil {
return nil, fmt.Errorf("failed to destroy network for sandbox %q: %v", id, teardownErr)
} }
} }
/*TODO:It is still possible that containerd crashes after we teardown the network, but before we remove the network namespace. /*TODO:It is still possible that containerd crashes after we teardown the network, but before we remove the network namespace.
@ -81,56 +75,68 @@ func (c *criContainerdService) StopPodSandbox(ctx context.Context, r *runtime.St
//Close the sandbox network namespace if it was created //Close the sandbox network namespace if it was created
if err = sandbox.NetNS.Remove(); err != nil { if err = sandbox.NetNS.Remove(); err != nil {
return nil, fmt.Errorf("failed to remove network namespace for sandbox %q: %v", id, err) return nil, errors.Wrapf(err, "failed to remove network namespace for sandbox %q", id)
} }
} }
logrus.Infof("TearDown network for sandbox %q successfully", id) logrus.Infof("TearDown network for sandbox %q successfully", id)
sandboxRoot := getSandboxRootDir(c.config.RootDir, id) if err := c.unmountSandboxFiles(id, sandbox.Config); err != nil {
if err := c.unmountSandboxFiles(sandboxRoot, sandbox.Config); err != nil { return nil, errors.Wrap(err, "failed to unmount sandbox files")
return nil, fmt.Errorf("failed to unmount sandbox files in %q: %v", sandboxRoot, err)
} }
// Only stop sandbox container when it's running. // Only stop sandbox container when it's running.
if sandbox.Status.Get().State == sandboxstore.StateReady { if sandbox.Status.Get().State == sandboxstore.StateReady {
if err := c.stopSandboxContainer(ctx, sandbox); err != nil { if err := c.stopSandboxContainer(ctx, sandbox); err != nil {
return nil, fmt.Errorf("failed to stop sandbox container %q: %v", id, err) return nil, errors.Wrapf(err, "failed to stop sandbox container %q", id)
} }
} }
return &runtime.StopPodSandboxResponse{}, nil return &runtime.StopPodSandboxResponse{}, nil
} }
// stopSandboxContainer kills and deletes sandbox container. // stopSandboxContainer kills and deletes sandbox container.
func (c *criContainerdService) stopSandboxContainer(ctx context.Context, sandbox sandboxstore.Sandbox) error { func (c *criService) stopSandboxContainer(ctx context.Context, sandbox sandboxstore.Sandbox) error {
container := sandbox.Container container := sandbox.Container
task, err := container.Task(ctx, nil) task, err := container.Task(ctx, nil)
if err != nil { if err != nil {
if errdefs.IsNotFound(err) { if errdefs.IsNotFound(err) {
return nil return nil
} }
return fmt.Errorf("failed to get sandbox container: %v", err) return errors.Wrap(err, "failed to get sandbox container")
} }
// Delete the sandbox container from containerd. // Delete the sandbox container from containerd.
_, err = task.Delete(ctx, containerd.WithProcessKill) _, err = task.Delete(ctx, containerd.WithProcessKill)
if err != nil && !errdefs.IsNotFound(err) { if err != nil && !errdefs.IsNotFound(err) {
return fmt.Errorf("failed to delete sandbox container: %v", err) return errors.Wrap(err, "failed to delete sandbox container")
} }
return c.waitSandboxStop(ctx, sandbox, killContainerTimeout) return c.waitSandboxStop(ctx, sandbox, killContainerTimeout)
} }
// waitSandboxStop waits for sandbox to be stopped until timeout exceeds or context is cancelled. // waitSandboxStop waits for sandbox to be stopped until timeout exceeds or context is cancelled.
func (c *criContainerdService) waitSandboxStop(ctx context.Context, sandbox sandboxstore.Sandbox, timeout time.Duration) error { func (c *criService) waitSandboxStop(ctx context.Context, sandbox sandboxstore.Sandbox, timeout time.Duration) error {
timeoutTimer := time.NewTimer(timeout) timeoutTimer := time.NewTimer(timeout)
defer timeoutTimer.Stop() defer timeoutTimer.Stop()
select { select {
case <-ctx.Done(): case <-ctx.Done():
return fmt.Errorf("wait sandbox container %q is cancelled", sandbox.ID) return errors.Errorf("wait sandbox container %q is cancelled", sandbox.ID)
case <-timeoutTimer.C: case <-timeoutTimer.C:
return fmt.Errorf("wait sandbox container %q stop timeout", sandbox.ID) return errors.Errorf("wait sandbox container %q stop timeout", sandbox.ID)
case <-sandbox.Stopped(): case <-sandbox.Stopped():
return nil return nil
} }
} }
// teardownPod removes the network from the pod
func (c *criService) teardownPod(id string, path string, config *runtime.PodSandboxConfig) error {
if c.netPlugin == nil {
return errors.New("cni config not intialized")
}
labels := getPodCNILabels(id, config)
return c.netPlugin.Remove(id,
path,
cni.WithLabels(labels),
cni.WithCapabilityPortMap(toCNIPortMappings(config.GetPortMappings())))
}

View File

@ -24,10 +24,11 @@ import (
"github.com/containerd/containerd" "github.com/containerd/containerd"
"github.com/containerd/containerd/plugin" "github.com/containerd/containerd/plugin"
"github.com/cri-o/ocicni/pkg/ocicni" cni "github.com/containerd/go-cni"
runcapparmor "github.com/opencontainers/runc/libcontainer/apparmor" runcapparmor "github.com/opencontainers/runc/libcontainer/apparmor"
runcseccomp "github.com/opencontainers/runc/libcontainer/seccomp" runcseccomp "github.com/opencontainers/runc/libcontainer/seccomp"
"github.com/opencontainers/selinux/go-selinux" "github.com/opencontainers/selinux/go-selinux"
"github.com/pkg/errors"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"google.golang.org/grpc" "google.golang.org/grpc"
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
@ -49,11 +50,11 @@ import (
type grpcServices interface { type grpcServices interface {
runtime.RuntimeServiceServer runtime.RuntimeServiceServer
runtime.ImageServiceServer runtime.ImageServiceServer
api.CRIContainerdServiceServer api.CRIPluginServiceServer
} }
// CRIContainerdService is the interface implement CRI remote service server. // CRIService is the interface implement CRI remote service server.
type CRIContainerdService interface { type CRIService interface {
Run() error Run() error
// io.Closer is used by containerd to gracefully stop cri service. // io.Closer is used by containerd to gracefully stop cri service.
io.Closer io.Closer
@ -61,8 +62,8 @@ type CRIContainerdService interface {
grpcServices grpcServices
} }
// criContainerdService implements CRIContainerdService. // criService implements CRIService.
type criContainerdService struct { type criService struct {
// config contains all configurations. // config contains all configurations.
config criconfig.Config config criconfig.Config
// imageFSPath is the path to image filesystem. // imageFSPath is the path to image filesystem.
@ -88,7 +89,7 @@ type criContainerdService struct {
// snapshotStore stores information of all snapshots. // snapshotStore stores information of all snapshots.
snapshotStore *snapshotstore.Store snapshotStore *snapshotstore.Store
// netPlugin is used to setup and teardown network when run/stop pod sandbox. // netPlugin is used to setup and teardown network when run/stop pod sandbox.
netPlugin ocicni.CNIPlugin netPlugin cni.CNI
// client is an instance of the containerd client // client is an instance of the containerd client
client *containerd.Client client *containerd.Client
// streamServer is the streaming server serves container streaming request. // streamServer is the streaming server serves container streaming request.
@ -100,10 +101,10 @@ type criContainerdService struct {
initialized atomic.Bool initialized atomic.Bool
} }
// NewCRIContainerdService returns a new instance of CRIContainerdService // NewCRIService returns a new instance of CRIService
func NewCRIContainerdService(config criconfig.Config, client *containerd.Client) (CRIContainerdService, error) { func NewCRIService(config criconfig.Config, client *containerd.Client) (CRIService, error) {
var err error var err error
c := &criContainerdService{ c := &criService{
config: config, config: config,
client: client, client: client,
apparmorEnabled: runcapparmor.IsEnabled(), apparmorEnabled: runcapparmor.IsEnabled(),
@ -129,15 +130,26 @@ func NewCRIContainerdService(config criconfig.Config, client *containerd.Client)
c.imageFSPath = imageFSPath(config.ContainerdRootDir, config.ContainerdConfig.Snapshotter) c.imageFSPath = imageFSPath(config.ContainerdRootDir, config.ContainerdConfig.Snapshotter)
logrus.Infof("Get image filesystem path %q", c.imageFSPath) logrus.Infof("Get image filesystem path %q", c.imageFSPath)
c.netPlugin, err = ocicni.InitCNI(config.NetworkPluginConfDir, config.NetworkPluginBinDir) // Pod needs to attach to atleast loopback network and a non host network,
// hence networkAttachCount is 2. If there are more network configs the
// pod will be attached to all the networks but we will only use the ip
// of the default network interface as the pod IP.
c.netPlugin, err = cni.New(cni.WithMinNetworkCount(networkAttachCount),
cni.WithPluginConfDir(config.NetworkPluginConfDir),
cni.WithPluginDir([]string{config.NetworkPluginBinDir}))
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to initialize cni plugin: %v", err) return nil, errors.Wrap(err, "failed to initialize cni")
} }
// Try to load the config if it exists. Just log the error if load fails
// This is not disruptive for containerd to panic
if err := c.netPlugin.Load(cni.WithLoNetwork(), cni.WithDefaultConf()); err != nil {
logrus.WithError(err).Error("Failed to load cni during init, please check CRI plugin status before setting up network for pods")
}
// prepare streaming server // prepare streaming server
c.streamServer, err = newStreamServer(c, config.StreamServerAddress, config.StreamServerPort) c.streamServer, err = newStreamServer(c, config.StreamServerAddress, config.StreamServerPort)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to create stream server: %v", err) return nil, errors.Wrap(err, "failed to create stream server")
} }
c.eventMonitor = newEventMonitor(c.containerStore, c.sandboxStore) c.eventMonitor = newEventMonitor(c.containerStore, c.sandboxStore)
@ -147,29 +159,29 @@ func NewCRIContainerdService(config criconfig.Config, client *containerd.Client)
// Register registers all required services onto a specific grpc server. // Register registers all required services onto a specific grpc server.
// This is used by containerd cri plugin. // This is used by containerd cri plugin.
func (c *criContainerdService) Register(s *grpc.Server) error { func (c *criService) Register(s *grpc.Server) error {
instrumented := newInstrumentedService(c) instrumented := newInstrumentedService(c)
runtime.RegisterRuntimeServiceServer(s, instrumented) runtime.RegisterRuntimeServiceServer(s, instrumented)
runtime.RegisterImageServiceServer(s, instrumented) runtime.RegisterImageServiceServer(s, instrumented)
api.RegisterCRIContainerdServiceServer(s, instrumented) api.RegisterCRIPluginServiceServer(s, instrumented)
return nil return nil
} }
// Run starts the cri-containerd service. // Run starts the CRI service.
func (c *criContainerdService) Run() error { func (c *criService) Run() error {
logrus.Info("Start subscribing containerd event") logrus.Info("Start subscribing containerd event")
c.eventMonitor.subscribe(c.client) c.eventMonitor.subscribe(c.client)
logrus.Infof("Start recovering state") logrus.Infof("Start recovering state")
if err := c.recover(ctrdutil.NamespacedContext()); err != nil { if err := c.recover(ctrdutil.NamespacedContext()); err != nil {
return fmt.Errorf("failed to recover state: %v", err) return errors.Wrap(err, "failed to recover state")
} }
// Start event handler. // Start event handler.
logrus.Info("Start event monitor") logrus.Info("Start event monitor")
eventMonitorCloseCh, err := c.eventMonitor.start() eventMonitorCloseCh, err := c.eventMonitor.start()
if err != nil { if err != nil {
return fmt.Errorf("failed to start event monitor: %v", err) return errors.Wrap(err, "failed to start event monitor")
} }
// Start snapshot stats syncer, it doesn't need to be stopped. // Start snapshot stats syncer, it doesn't need to be stopped.
@ -194,13 +206,13 @@ func (c *criContainerdService) Run() error {
// Set the server as initialized. GRPC services could start serving traffic. // Set the server as initialized. GRPC services could start serving traffic.
c.initialized.Set() c.initialized.Set()
// Stop the whole cri-containerd service if any of the critical service exits. // Stop the whole CRI service if any of the critical service exits.
select { select {
case <-eventMonitorCloseCh: case <-eventMonitorCloseCh:
case <-streamServerCloseCh: case <-streamServerCloseCh:
} }
if err := c.Close(); err != nil { if err := c.Close(); err != nil {
return fmt.Errorf("failed to stop cri service: %v", err) return errors.Wrap(err, "failed to stop cri service")
} }
<-eventMonitorCloseCh <-eventMonitorCloseCh
@ -223,13 +235,13 @@ func (c *criContainerdService) Run() error {
return nil return nil
} }
// Stop stops the cri-containerd service. // Stop stops the CRI service.
func (c *criContainerdService) Close() error { func (c *criService) Close() error {
logrus.Info("Stop cri-containerd service") logrus.Info("Stop CRI service")
// TODO(random-liu): Make event monitor stop synchronous. // TODO(random-liu): Make event monitor stop synchronous.
c.eventMonitor.stop() c.eventMonitor.stop()
if err := c.streamServer.Stop(); err != nil { if err := c.streamServer.Stop(); err != nil {
return fmt.Errorf("failed to stop stream server: %v", err) return errors.Wrap(err, "failed to stop stream server")
} }
return nil return nil
} }

View File

@ -18,11 +18,11 @@ package server
import ( import (
"context" "context"
"fmt"
"time" "time"
"github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/errdefs"
snapshot "github.com/containerd/containerd/snapshots" snapshot "github.com/containerd/containerd/snapshots"
"github.com/pkg/errors"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
ctrdutil "github.com/containerd/cri/pkg/containerd/util" ctrdutil "github.com/containerd/cri/pkg/containerd/util"
@ -80,7 +80,7 @@ func (s *snapshotsSyncer) sync() error {
snapshots = append(snapshots, info) snapshots = append(snapshots, info)
return nil return nil
}); err != nil { }); err != nil {
return fmt.Errorf("walk all snapshots failed: %v", err) return errors.Wrap(err, "walk all snapshots failed")
} }
for _, info := range snapshots { for _, info := range snapshots {
sn, err := s.store.Get(info.Name) sn, err := s.store.Get(info.Name)

View File

@ -21,6 +21,7 @@ import (
"fmt" "fmt"
goruntime "runtime" goruntime "runtime"
cni "github.com/containerd/go-cni"
"golang.org/x/net/context" "golang.org/x/net/context"
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
) )
@ -29,7 +30,7 @@ import (
const networkNotReadyReason = "NetworkPluginNotReady" const networkNotReadyReason = "NetworkPluginNotReady"
// Status returns the status of the runtime. // Status returns the status of the runtime.
func (c *criContainerdService) Status(ctx context.Context, r *runtime.StatusRequest) (*runtime.StatusResponse, error) { func (c *criService) Status(ctx context.Context, r *runtime.StatusRequest) (*runtime.StatusResponse, error) {
// As a containerd plugin, if CRI plugin is serving request, // As a containerd plugin, if CRI plugin is serving request,
// containerd must be ready. // containerd must be ready.
runtimeCondition := &runtime.RuntimeCondition{ runtimeCondition := &runtime.RuntimeCondition{
@ -40,11 +41,15 @@ func (c *criContainerdService) Status(ctx context.Context, r *runtime.StatusRequ
Type: runtime.NetworkReady, Type: runtime.NetworkReady,
Status: true, Status: true,
} }
// Check the status of the cni initialization
if err := c.netPlugin.Status(); err != nil { if err := c.netPlugin.Status(); err != nil {
// If it is not initialized, then load the config and retry
if err = c.netPlugin.Load(cni.WithLoNetwork(), cni.WithDefaultConf()); err != nil {
networkCondition.Status = false networkCondition.Status = false
networkCondition.Reason = networkNotReadyReason networkCondition.Reason = networkNotReadyReason
networkCondition.Message = fmt.Sprintf("Network plugin returns error: %v", err) networkCondition.Message = fmt.Sprintf("Network plugin returns error: %v", err)
} }
}
resp := &runtime.StatusResponse{ resp := &runtime.StatusResponse{
Status: &runtime.RuntimeStatus{Conditions: []*runtime.RuntimeCondition{ Status: &runtime.RuntimeStatus{Conditions: []*runtime.RuntimeCondition{

View File

@ -17,39 +17,65 @@ limitations under the License.
package server package server
import ( import (
"crypto/rand"
"crypto/rsa"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"fmt" "fmt"
"io" "io"
"math" "math"
"math/big"
"net" "net"
"os"
"time"
"github.com/pkg/errors"
k8snet "k8s.io/apimachinery/pkg/util/net" k8snet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/tools/remotecommand" "k8s.io/client-go/tools/remotecommand"
k8scert "k8s.io/client-go/util/cert"
"k8s.io/kubernetes/pkg/kubelet/server/streaming" "k8s.io/kubernetes/pkg/kubelet/server/streaming"
"k8s.io/utils/exec" "k8s.io/utils/exec"
ctrdutil "github.com/containerd/cri/pkg/containerd/util" ctrdutil "github.com/containerd/cri/pkg/containerd/util"
) )
func newStreamServer(c *criContainerdService, addr, port string) (streaming.Server, error) { const (
// certOrganizationName is the name of this organization, used for certificates etc.
certOrganizationName = "containerd"
// certCommonName is the common name of the CRI plugin
certCommonName = "cri"
)
func newStreamServer(c *criService, addr, port string) (streaming.Server, error) {
if addr == "" { if addr == "" {
a, err := k8snet.ChooseBindAddress(nil) a, err := k8snet.ChooseBindAddress(nil)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to get stream server address: %v", err) return nil, errors.Wrap(err, "failed to get stream server address")
} }
addr = a.String() addr = a.String()
} }
config := streaming.DefaultConfig config := streaming.DefaultConfig
config.Addr = net.JoinHostPort(addr, port) config.Addr = net.JoinHostPort(addr, port)
runtime := newStreamRuntime(c) runtime := newStreamRuntime(c)
tlsCert, err := newTLSCert()
if err != nil {
return nil, errors.Wrap(err, "failed to generate tls certificate for stream server")
}
config.TLSConfig = &tls.Config{
Certificates: []tls.Certificate{tlsCert},
InsecureSkipVerify: true,
}
return streaming.NewServer(config, runtime) return streaming.NewServer(config, runtime)
} }
type streamRuntime struct { type streamRuntime struct {
c *criContainerdService c *criService
} }
func newStreamRuntime(c *criContainerdService) streaming.Runtime { func newStreamRuntime(c *criService) streaming.Runtime {
return &streamRuntime{c: c} return &streamRuntime{c: c}
} }
@ -66,13 +92,13 @@ func (s *streamRuntime) Exec(containerID string, cmd []string, stdin io.Reader,
resize: resize, resize: resize,
}) })
if err != nil { if err != nil {
return fmt.Errorf("failed to exec in container: %v", err) return errors.Wrap(err, "failed to exec in container")
} }
if *exitCode == 0 { if *exitCode == 0 {
return nil return nil
} }
return &exec.CodeExitError{ return &exec.CodeExitError{
Err: fmt.Errorf("error executing command %v, exit code %d", cmd, *exitCode), Err: errors.Errorf("error executing command %v, exit code %d", cmd, *exitCode),
Code: int(*exitCode), Code: int(*exitCode),
} }
} }
@ -84,7 +110,7 @@ func (s *streamRuntime) Attach(containerID string, in io.Reader, out, err io.Wri
func (s *streamRuntime) PortForward(podSandboxID string, port int32, stream io.ReadWriteCloser) error { func (s *streamRuntime) PortForward(podSandboxID string, port int32, stream io.ReadWriteCloser) error {
if port <= 0 || port > math.MaxUint16 { if port <= 0 || port > math.MaxUint16 {
return fmt.Errorf("invalid port %d", port) return errors.Errorf("invalid port %d", port)
} }
return s.c.portForward(podSandboxID, port, stream) return s.c.portForward(podSandboxID, port, stream)
} }
@ -112,3 +138,87 @@ func handleResizing(resize <-chan remotecommand.TerminalSize, resizeFunc func(si
} }
}() }()
} }
// newTLSCert returns a tls.certificate loaded from a newly generated
// x509certificate from a newly generated rsa public/private key pair. The
// x509certificate is self signed.
// TODO (mikebrow): replace / rewrite this function to support using CA
// signing of the cetificate. Requires a security plan for kubernetes regarding
// CRI connections / streaming, etc. For example, kubernetes could configure or
// require a CA service and pass a configuration down through CRI.
func newTLSCert() (tls.Certificate, error) {
fail := func(err error) (tls.Certificate, error) { return tls.Certificate{}, err }
var years = 1 // duration of certificate
// Generate new private key
privKey, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
return fail(errors.Wrap(err, "private key cannot be created"))
}
// Generate pem block using the private key
keyPem := pem.EncodeToMemory(&pem.Block{
Type: k8scert.RSAPrivateKeyBlockType,
Bytes: x509.MarshalPKCS1PrivateKey(privKey),
})
// Generate a new random serial number for certificate
serialNumber, err := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 128))
if err != nil {
return fail(errors.Wrap(err, "failed to generate serial number"))
}
hostName, err := os.Hostname()
if err != nil {
return fail(errors.Wrap(err, "failed to get hostname"))
}
addrs, err := net.InterfaceAddrs()
if err != nil {
return fail(errors.Wrap(err, "failed to get host IP addresses"))
}
// Configure and create new certificate
tml := x509.Certificate{
NotBefore: time.Now(),
NotAfter: time.Now().AddDate(years, 0, 0),
SerialNumber: serialNumber,
Subject: pkix.Name{
CommonName: fmt.Sprintf("%s:%s:%s", certOrganizationName, certCommonName, hostName),
Organization: []string{certOrganizationName},
},
BasicConstraintsValid: true,
}
for _, addr := range addrs {
var ip net.IP
switch v := addr.(type) {
case *net.IPNet:
ip = v.IP
case *net.IPAddr:
ip = v.IP
default:
continue
}
tml.IPAddresses = append(tml.IPAddresses, ip)
tml.DNSNames = append(tml.DNSNames, ip.String())
}
cert, err := x509.CreateCertificate(rand.Reader, &tml, &tml, &privKey.PublicKey, privKey)
if err != nil {
return fail(errors.Wrap(err, "certificate cannot be created"))
}
// Generate a pem block with the certificate
certPem := pem.EncodeToMemory(&pem.Block{
Type: k8scert.CertificateBlockType,
Bytes: cert,
})
// Load the tls certificate
tlsCert, err := tls.X509KeyPair(certPem, keyPem)
if err != nil {
return fail(errors.Wrap(err, "certificate could not be loaded"))
}
return tlsCert, nil
}

View File

@ -24,6 +24,6 @@ import (
// UpdateRuntimeConfig updates the runtime config. Currently only handles podCIDR updates. // UpdateRuntimeConfig updates the runtime config. Currently only handles podCIDR updates.
// TODO(random-liu): Figure out how to handle pod cidr in the cri plugin. // TODO(random-liu): Figure out how to handle pod cidr in the cri plugin.
func (c *criContainerdService) UpdateRuntimeConfig(ctx context.Context, r *runtime.UpdateRuntimeConfigRequest) (*runtime.UpdateRuntimeConfigResponse, error) { func (c *criService) UpdateRuntimeConfig(ctx context.Context, r *runtime.UpdateRuntimeConfigRequest) (*runtime.UpdateRuntimeConfigResponse, error) {
return &runtime.UpdateRuntimeConfigResponse{}, nil return &runtime.UpdateRuntimeConfigResponse{}, nil
} }

View File

@ -32,7 +32,7 @@ const (
) )
// Version returns the runtime name, runtime version and runtime API version. // Version returns the runtime name, runtime version and runtime API version.
func (c *criContainerdService) Version(ctx context.Context, r *runtime.VersionRequest) (*runtime.VersionResponse, error) { func (c *criService) Version(ctx context.Context, r *runtime.VersionRequest) (*runtime.VersionResponse, error) {
return &runtime.VersionResponse{ return &runtime.VersionResponse{
Version: kubeAPIVersion, Version: kubeAPIVersion,
RuntimeName: containerName, RuntimeName: containerName,

View File

@ -18,8 +18,8 @@ package container
import ( import (
"encoding/json" "encoding/json"
"fmt"
"github.com/pkg/errors"
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
) )
@ -80,5 +80,5 @@ func (c *Metadata) UnmarshalJSON(data []byte) error {
*c = Metadata(versioned.Metadata) *c = Metadata(versioned.Metadata)
return nil return nil
} }
return fmt.Errorf("unsupported version: %q", versioned.Version) return errors.Errorf("unsupported version: %q", versioned.Version)
} }

View File

@ -18,13 +18,13 @@ package container
import ( import (
"encoding/json" "encoding/json"
"fmt"
"io/ioutil" "io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"sync" "sync"
"github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/ioutils"
"github.com/pkg/errors"
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
) )
@ -95,7 +95,7 @@ func (s *Status) decode(data []byte) error {
*s = versioned.Status *s = versioned.Status
return nil return nil
} }
return fmt.Errorf("unsupported version") return errors.New("unsupported version")
} }
// UpdateFunc is function used to update the container status. If there // UpdateFunc is function used to update the container status. If there
@ -125,11 +125,11 @@ type StatusStorage interface {
func StoreStatus(root, id string, status Status) (StatusStorage, error) { func StoreStatus(root, id string, status Status) (StatusStorage, error) {
data, err := status.encode() data, err := status.encode()
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to encode status: %v", err) return nil, errors.Wrap(err, "failed to encode status")
} }
path := filepath.Join(root, "status") path := filepath.Join(root, "status")
if err := ioutils.AtomicWriteFile(path, data, 0600); err != nil { if err := ioutils.AtomicWriteFile(path, data, 0600); err != nil {
return nil, fmt.Errorf("failed to checkpoint status to %q: %v", path, err) return nil, errors.Wrapf(err, "failed to checkpoint status to %q", path)
} }
return &statusStorage{ return &statusStorage{
path: path, path: path,
@ -143,11 +143,11 @@ func LoadStatus(root, id string) (Status, error) {
path := filepath.Join(root, "status") path := filepath.Join(root, "status")
data, err := ioutil.ReadFile(path) data, err := ioutil.ReadFile(path)
if err != nil { if err != nil {
return Status{}, fmt.Errorf("failed to read status from %q: %v", path, err) return Status{}, errors.Wrapf(err, "failed to read status from %q", path)
} }
var status Status var status Status
if err := status.decode(data); err != nil { if err := status.decode(data); err != nil {
return Status{}, fmt.Errorf("failed to decode status %q: %v", data, err) return Status{}, errors.Wrapf(err, "failed to decode status %q", data)
} }
return status, nil return status, nil
} }
@ -175,10 +175,10 @@ func (s *statusStorage) UpdateSync(u UpdateFunc) error {
} }
data, err := newStatus.encode() data, err := newStatus.encode()
if err != nil { if err != nil {
return fmt.Errorf("failed to encode status: %v", err) return errors.Wrap(err, "failed to encode status")
} }
if err := ioutils.AtomicWriteFile(s.path, data, 0600); err != nil { if err := ioutils.AtomicWriteFile(s.path, data, 0600); err != nil {
return fmt.Errorf("failed to checkpoint status to %q: %v", s.path, err) return errors.Wrapf(err, "failed to checkpoint status to %q", s.path)
} }
s.status = newStatus s.status = newStatus
return nil return nil

View File

@ -18,8 +18,8 @@ package sandbox
import ( import (
"encoding/json" "encoding/json"
"fmt"
"github.com/pkg/errors"
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
) )
@ -76,5 +76,5 @@ func (c *Metadata) UnmarshalJSON(data []byte) error {
*c = Metadata(versioned.Metadata) *c = Metadata(versioned.Metadata)
return nil return nil
} }
return fmt.Errorf("unsupported version: %q", versioned.Version) return errors.Errorf("unsupported version: %q", versioned.Version)
} }

View File

@ -17,15 +17,15 @@ limitations under the License.
package sandbox package sandbox
import ( import (
"errors"
"fmt"
"os" "os"
"sync" "sync"
cnins "github.com/containernetworking/plugins/pkg/ns" cnins "github.com/containernetworking/plugins/pkg/ns"
"github.com/docker/docker/pkg/mount"
"github.com/docker/docker/pkg/symlink" "github.com/docker/docker/pkg/symlink"
"github.com/pkg/errors"
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
osinterface "github.com/containerd/cri/pkg/os"
) )
// ErrClosedNetNS is the error returned when network namespace is closed. // ErrClosedNetNS is the error returned when network namespace is closed.
@ -43,7 +43,7 @@ type NetNS struct {
func NewNetNS() (*NetNS, error) { func NewNetNS() (*NetNS, error) {
netns, err := cnins.NewNS() netns, err := cnins.NewNS()
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to setup network namespace %v", err) return nil, errors.Wrap(err, "failed to setup network namespace")
} }
n := new(NetNS) n := new(NetNS)
n.ns = netns n.ns = netns
@ -63,7 +63,7 @@ func LoadNetNS(path string) (*NetNS, error) {
os.RemoveAll(path) // nolint: errcheck os.RemoveAll(path) // nolint: errcheck
return nil, ErrClosedNetNS return nil, ErrClosedNetNS
} }
return nil, fmt.Errorf("failed to load network namespace %v", err) return nil, errors.Wrap(err, "failed to load network namespace")
} }
return &NetNS{ns: ns, restored: true}, nil return &NetNS{ns: ns, restored: true}, nil
} }
@ -76,36 +76,28 @@ func (n *NetNS) Remove() error {
if !n.closed { if !n.closed {
err := n.ns.Close() err := n.ns.Close()
if err != nil { if err != nil {
return fmt.Errorf("failed to close network namespace: %v", err) return errors.Wrap(err, "failed to close network namespace")
} }
n.closed = true n.closed = true
} }
if n.restored { if n.restored {
path := n.ns.Path() path := n.ns.Path()
// TODO(random-liu): Add util function for unmount.
// Check netns existence. // Check netns existence.
if _, err := os.Stat(path); err != nil { if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) { if os.IsNotExist(err) {
return nil return nil
} }
return fmt.Errorf("failed to stat netns: %v", err) return errors.Wrap(err, "failed to stat netns")
} }
path, err := symlink.FollowSymlinkInScope(path, "/") path, err := symlink.FollowSymlinkInScope(path, "/")
if err != nil { if err != nil {
return fmt.Errorf("failed to follow symlink: %v", err) return errors.Wrap(err, "failed to follow symlink")
}
mounted, err := mount.Mounted(path)
if err != nil {
return fmt.Errorf("failed to check netns mounted: %v", err)
}
if mounted {
err := unix.Unmount(path, unix.MNT_DETACH)
if err != nil && !os.IsNotExist(err) {
return fmt.Errorf("failed to umount netns: %v", err)
} }
if err := osinterface.Unmount(path, unix.MNT_DETACH); err != nil && !os.IsNotExist(err) {
return errors.Wrap(err, "failed to umount netns")
} }
if err := os.RemoveAll(path); err != nil { if err := os.RemoveAll(path); err != nil {
return fmt.Errorf("failed to remove netns: %v", err) return errors.Wrap(err, "failed to remove netns")
} }
n.restored = false n.restored = false
} }

View File

@ -18,24 +18,25 @@ package util
import ( import (
"encoding/json" "encoding/json"
"fmt"
"github.com/pkg/errors"
) )
// DeepCopy makes a deep copy from src into dst. // DeepCopy makes a deep copy from src into dst.
func DeepCopy(dst interface{}, src interface{}) error { func DeepCopy(dst interface{}, src interface{}) error {
if dst == nil { if dst == nil {
return fmt.Errorf("dst cannot be nil") return errors.New("dst cannot be nil")
} }
if src == nil { if src == nil {
return fmt.Errorf("src cannot be nil") return errors.New("src cannot be nil")
} }
bytes, err := json.Marshal(src) bytes, err := json.Marshal(src)
if err != nil { if err != nil {
return fmt.Errorf("unable to marshal src: %s", err) return errors.Wrap(err, "unable to marshal src")
} }
err = json.Unmarshal(bytes, dst) err = json.Unmarshal(bytes, dst)
if err != nil { if err != nil {
return fmt.Errorf("unable to unmarshal into dst: %s", err) return errors.Wrap(err, "unable to unmarshal into dst")
} }
return nil return nil
} }

View File

@ -3,16 +3,16 @@ github.com/blang/semver v3.1.0
github.com/boltdb/bolt e9cf4fae01b5a8ff89d0ec6b32f0d9c9f79aefdd github.com/boltdb/bolt e9cf4fae01b5a8ff89d0ec6b32f0d9c9f79aefdd
github.com/BurntSushi/toml a368813c5e648fee92e5f6c30e3944ff9d5e8895 github.com/BurntSushi/toml a368813c5e648fee92e5f6c30e3944ff9d5e8895
github.com/containerd/cgroups fe281dd265766145e943a034aa41086474ea6130 github.com/containerd/cgroups fe281dd265766145e943a034aa41086474ea6130
github.com/containerd/console 84eeaae905fa414d03e07bcd6c8d3f19e7cf180e github.com/containerd/console cb7008ab3d8359b78c5f464cb7cf160107ad5925
github.com/containerd/containerd 3013762fc58941e33ba70e8f8d9256911f134124 github.com/containerd/containerd 8a7e17ef96678507a4b23d2bc66e5bbe5b50ad37
github.com/containerd/continuity d8fb8589b0e8e85b8c8bbaa8840226d0dfeb7371 github.com/containerd/continuity 3e8f2ea4b190484acb976a5b378d373429639a1a
github.com/containerd/fifo fbfb6a11ec671efbe94ad1c12c2e98773f19e1e6 github.com/containerd/fifo 3d5202aec260678c48179c56f40e6f38a095738c
github.com/containerd/go-runc 4f6e87ae043f859a38255247b49c9abc262d002f github.com/containerd/go-runc bcb223a061a3dd7de1a89c0b402a60f4dd9bd307
github.com/containerd/go-cni f2d7272f12d045b16ed924f50e91f9f9cecc55a7
github.com/containerd/typeurl f6943554a7e7e88b3c14aad190bf05932da84788 github.com/containerd/typeurl f6943554a7e7e88b3c14aad190bf05932da84788
github.com/containernetworking/cni v0.6.0 github.com/containernetworking/cni v0.6.0
github.com/containernetworking/plugins v0.6.0 github.com/containernetworking/plugins v0.7.0
github.com/coreos/go-systemd 48702e0da86bd25e76cfef347e2adeb434a0d0a6 github.com/coreos/go-systemd 48702e0da86bd25e76cfef347e2adeb434a0d0a6
github.com/cri-o/ocicni 9b451e26eb7c694d564991fbf44f77d0afb9b03c
github.com/davecgh/go-spew v1.1.0 github.com/davecgh/go-spew v1.1.0
github.com/docker/distribution b38e5838b7b2f2ad48e06ec4b500011976080621 github.com/docker/distribution b38e5838b7b2f2ad48e06ec4b500011976080621
github.com/docker/docker 86f080cff0914e9694068ed78d503701667c4c00 github.com/docker/docker 86f080cff0914e9694068ed78d503701667c4c00
@ -21,7 +21,6 @@ github.com/docker/go-metrics 4ea375f7759c82740c893fc030bc37088d2ec098
github.com/docker/go-units v0.3.1 github.com/docker/go-units v0.3.1
github.com/docker/spdystream 449fdfce4d962303d702fec724ef0ad181c92528 github.com/docker/spdystream 449fdfce4d962303d702fec724ef0ad181c92528
github.com/emicklei/go-restful ff4f55a206334ef123e4f79bbf348980da81ca46 github.com/emicklei/go-restful ff4f55a206334ef123e4f79bbf348980da81ca46
github.com/fsnotify/fsnotify 7d7316ed6e1ed2de075aab8dfc76de5d158d66e1
github.com/ghodss/yaml 73d445a93680fa1a78ae23a5839bad48f32ba1ee github.com/ghodss/yaml 73d445a93680fa1a78ae23a5839bad48f32ba1ee
github.com/godbus/dbus c7fdd8b5cd55e87b4e1f4e372cdb1db61dd6c66f github.com/godbus/dbus c7fdd8b5cd55e87b4e1f4e372cdb1db61dd6c66f
github.com/gogo/protobuf v0.5 github.com/gogo/protobuf v0.5
@ -31,14 +30,13 @@ github.com/google/gofuzz 44d81051d367757e1c7c6a5a86423ece9afcf63c
github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0 github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0
github.com/hashicorp/errwrap 7554cd9344cec97297fa6649b055a8c98c2a1e55 github.com/hashicorp/errwrap 7554cd9344cec97297fa6649b055a8c98c2a1e55
github.com/hashicorp/go-multierror ed905158d87462226a13fe39ddf685ea65f1c11f github.com/hashicorp/go-multierror ed905158d87462226a13fe39ddf685ea65f1c11f
github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
github.com/json-iterator/go 1.0.4 github.com/json-iterator/go 1.0.4
github.com/matttproud/golang_protobuf_extensions v1.0.0 github.com/matttproud/golang_protobuf_extensions v1.0.0
github.com/Microsoft/go-winio v0.4.5 github.com/Microsoft/go-winio v0.4.5
github.com/Microsoft/hcsshim v0.6.7 github.com/Microsoft/hcsshim v0.6.7
github.com/opencontainers/go-digest 21dfd564fd89c944783d00d069f33e3e7123c448 github.com/opencontainers/go-digest 21dfd564fd89c944783d00d069f33e3e7123c448
github.com/opencontainers/image-spec v1.0.1 github.com/opencontainers/image-spec v1.0.1
github.com/opencontainers/runc a618ab5a0186905949ee463dbb762c3d23e12a80 github.com/opencontainers/runc 69663f0bd4b60df09991c08812a60108003fa340
github.com/opencontainers/runtime-spec v1.0.1 github.com/opencontainers/runtime-spec v1.0.1
github.com/opencontainers/runtime-tools 6073aff4ac61897f75895123f7e24135204a404d github.com/opencontainers/runtime-tools 6073aff4ac61897f75895123f7e24135204a404d
github.com/opencontainers/selinux 4a2974bf1ee960774ffd517717f1f45325af0206 github.com/opencontainers/selinux 4a2974bf1ee960774ffd517717f1f45325af0206
@ -48,10 +46,8 @@ github.com/prometheus/client_golang f4fb1b73fb099f396a7f0036bf86aa8def4ed823
github.com/prometheus/client_model 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c github.com/prometheus/client_model 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c
github.com/prometheus/common 89604d197083d4781071d3c65855d24ecfb0a563 github.com/prometheus/common 89604d197083d4781071d3c65855d24ecfb0a563
github.com/prometheus/procfs cb4147076ac75738c9a7d279075a253c0cc5acbd github.com/prometheus/procfs cb4147076ac75738c9a7d279075a253c0cc5acbd
github.com/renstrom/dedent 020d11c3b9c0c7a3c2efcc8e5cf5b9ef7bcea21f
github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0 github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0
github.com/sirupsen/logrus v1.0.0 github.com/sirupsen/logrus v1.0.0
github.com/spf13/cobra v0.0.1
github.com/spf13/pflag v1.0.0 github.com/spf13/pflag v1.0.0
github.com/stevvooe/ttrpc d4528379866b0ce7e9d71f3eb96f0582fc374577 github.com/stevvooe/ttrpc d4528379866b0ce7e9d71f3eb96f0582fc374577
github.com/stretchr/testify v1.1.4 github.com/stretchr/testify v1.1.4
@ -63,13 +59,14 @@ golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c
golang.org/x/sys 314a259e304ff91bd6985da2a7149bbf91237993 https://github.com/golang/sys golang.org/x/sys 314a259e304ff91bd6985da2a7149bbf91237993 https://github.com/golang/sys
golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4 golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4
golang.org/x/time f51c12702a4d776e4c1fa9b0fabab841babae631 golang.org/x/time f51c12702a4d776e4c1fa9b0fabab841babae631
golang.org/x/crypto 49796115aa4b964c318aad4f3084fdb41e9aa067
google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944 google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
google.golang.org/grpc v1.7.4 google.golang.org/grpc v1.7.4
gopkg.in/inf.v0 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4 gopkg.in/inf.v0 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4
gopkg.in/yaml.v2 53feefa2559fb8dfa8d81baad31be332c97d6c77 gopkg.in/yaml.v2 53feefa2559fb8dfa8d81baad31be332c97d6c77
k8s.io/api a1d6dce6736a6c75929bb75111e89077e35a5856 k8s.io/api 5584376ceeffeb13a2e98b5e9f0e9dab37de4bab
k8s.io/apimachinery 8259d997cf059cd83dc47e5f8074b7a7d7967c09 k8s.io/apimachinery fcb9a12f7875d01f8390b28faedc37dcf2e713b9
k8s.io/apiserver 8e45eac9dff86447a5c2effe6a3d2cba70121ebf k8s.io/apiserver 837069aa36757a586e4a8165f1ff5ca06170aa4a
k8s.io/client-go 33bd23f75b6de861994706a322b0afab824b2171 k8s.io/client-go 484f27892430b961df38fe6715cc396409207d9f
k8s.io/kubernetes 05944b1d2ca7f60b09762a330425108f48f6b603 k8s.io/kubernetes v1.10.0-rc.1
k8s.io/utils 258e2a2fa64568210fbd6267cf1d8fd87c3cb86e k8s.io/utils 258e2a2fa64568210fbd6267cf1d8fd87c3cb86e

View File

@ -1,4 +1,3 @@
Apache License Apache License
Version 2.0, January 2004 Version 2.0, January 2004
http://www.apache.org/licenses/ http://www.apache.org/licenses/
@ -176,7 +175,18 @@
END OF TERMS AND CONDITIONS END OF TERMS AND CONDITIONS
Copyright 2016 Red Hat, Inc. APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

46
vendor/github.com/containerd/go-cni/README.md generated vendored Normal file
View File

@ -0,0 +1,46 @@
# go-cni
A generic CNI library to provide APIs for CNI plugin interactions. The library provides APIs to:
- Setup networks for container namespace
- Remove networks from container namespace
- Query status of CNI network plugin initialization
go-cni aims to support plugins that implement [Container Network Interface](https://github.com/containernetworking/cni)
## Usage
```
func main() {
id := "123456"
netns := "/proc/9999/ns/net"
defaultIfName := "eth0"
// Initialize library
l = gocni.New(gocni.WithMinNetworkCount(2),
gocni.WithLoNetwork(),
gocni.WithPluginConfDir("/etc/mycni/net.d"),
gocni.WithPluginDir([]string{"/opt/mycni/bin", "/opt/cni/bin"}),
gocni.WithDefaultIfName(defaultIfName))
// Setup network for namespace.
labels := map[string]string{
"K8S_POD_NAMESPACE": "namespace1",
"K8S_POD_NAME": "pod1",
"K8S_POD_INFRA_CONTAINER_ID": id,
}
result, err := l.Setup(id, netns, gocni.WithLabels(labels))
if err != nil {
return nil, fmt.Errorf("failed to setup network for namespace %q: %v", id, err)
}
defer func() {
if retErr != nil {
// Teardown network if an error is returned.
if err := l.Remove(id, netns, gocni.WithLabels(labels)); err != nil {
fmt.Errorf("Failed to destroy network for namespace %q", id)
}
}
}()
// Get IP of the default interface
IP := result.Interfaces[defaultIfName].IPConfigs[0].IP.String()
fmt.Printf("IP of the default interface %s:%s", defaultIfName, IP)
}
```

141
vendor/github.com/containerd/go-cni/cni.go generated vendored Normal file
View File

@ -0,0 +1,141 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cni
import (
"fmt"
"sync"
cnilibrary "github.com/containernetworking/cni/libcni"
"github.com/containernetworking/cni/pkg/types/current"
"github.com/pkg/errors"
)
type CNI interface {
// Setup setup the network for the namespace
Setup(id string, path string, opts ...NamespaceOpts) (*CNIResult, error)
// Remove tears down the network of the namespace.
Remove(id string, path string, opts ...NamespaceOpts) error
// Load loads the cni network config
Load(opts ...LoadOption) error
// Status checks the status of the cni initialization
Status() error
}
type libcni struct {
config
cniConfig cnilibrary.CNI
networkCount int // minimum network plugin configurations needed to initialize cni
networks []*Network
sync.RWMutex
}
func defaultCNIConfig() *libcni {
return &libcni{
config: config{
pluginDirs: []string{DefaultCNIDir},
pluginConfDir: DefaultNetDir,
prefix: DefaultPrefix,
},
cniConfig: &cnilibrary.CNIConfig{
Path: []string{DefaultCNIDir},
},
networkCount: 1,
}
}
func New(config ...ConfigOption) (CNI, error) {
cni := defaultCNIConfig()
var err error
for _, c := range config {
if err = c(cni); err != nil {
return nil, err
}
}
return cni, nil
}
func (c *libcni) Load(opts ...LoadOption) error {
var err error
// Reset the networks on a load operation to ensure
// config happens on a clean slate
c.reset()
for _, o := range opts {
if err = o(c); err != nil {
return errors.Wrapf(ErrLoad, fmt.Sprintf("cni config load failed: %v", err))
}
}
return c.Status()
}
func (c *libcni) Status() error {
c.RLock()
defer c.RUnlock()
if len(c.networks) < c.networkCount {
return ErrCNINotInitialized
}
return nil
}
// Setup setups the network in the namespace
func (c *libcni) Setup(id string, path string, opts ...NamespaceOpts) (*CNIResult, error) {
if err:=c.Status();err!=nil{
return nil,err
}
ns, err := newNamespace(id, path, opts...)
if err != nil {
return nil, err
}
var results []*current.Result
c.RLock()
defer c.RUnlock()
for _, network := range c.networks {
r, err := network.Attach(ns)
if err != nil {
return nil, err
}
results = append(results, r)
}
return c.GetCNIResultFromResults(results)
}
// Remove removes the network config from the namespace
func (c *libcni) Remove(id string, path string, opts ...NamespaceOpts) error {
if err:=c.Status();err!=nil{
return err
}
ns, err := newNamespace(id, path, opts...)
if err != nil {
return err
}
c.RLock()
defer c.RUnlock()
for _, network := range c.networks {
if err := network.Remove(ns); err != nil {
return err
}
}
return nil
}
func (c *libcni) reset() {
c.Lock()
defer c.Unlock()
c.networks = nil
}

39
vendor/github.com/containerd/go-cni/errors.go generated vendored Normal file
View File

@ -0,0 +1,39 @@
package cni
import (
"github.com/pkg/errors"
)
var (
ErrCNINotInitialized = errors.New("cni plugin not initialized")
ErrInvalidConfig = errors.New("invalid cni config")
ErrNotFound = errors.New("not found")
ErrRead = errors.New("failed to read config file")
ErrInvalidResult = errors.New("invalid result")
ErrLoad = errors.New("failed to load cni config")
)
// IsCNINotInitialized returns true if the error is due cni config not being intialized
func IsCNINotInitialized(err error) bool {
return errors.Cause(err) == ErrCNINotInitialized
}
// IsInvalidConfig returns true if the error is invalid cni config
func IsInvalidConfig(err error) bool {
return errors.Cause(err) == ErrInvalidConfig
}
// IsNotFound returns true if the error is due to a missing config or result
func IsNotFound(err error) bool {
return errors.Cause(err) == ErrNotFound
}
// IsReadFailure return true if the error is a config read failure
func IsReadFailure(err error) bool {
return errors.Cause(err) == ErrRead
}
// IsInvalidResult return true if the error is due to invalid cni result
func IsInvalidResult(err error) bool {
return errors.Cause(err) == ErrInvalidResult
}

41
vendor/github.com/containerd/go-cni/helper.go generated vendored Normal file
View File

@ -0,0 +1,41 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cni
import (
"fmt"
"github.com/containernetworking/cni/pkg/types/current"
)
func validateInterfaceConfig(ipConf *current.IPConfig, ifs int) error {
if ipConf == nil {
return fmt.Errorf("invalid IP configuration")
}
if ipConf.Interface != nil && *ipConf.Interface > ifs {
return fmt.Errorf("invalid IP configuration with invalid interface %d", *ipConf.Interface)
}
return nil
}
func getIfName(prefix string, i int) string {
return fmt.Sprintf("%s%d", prefix, i)
}
func defaultInterface(prefix string) string {
return getIfName(prefix, 0)
}

75
vendor/github.com/containerd/go-cni/namespace.go generated vendored Normal file
View File

@ -0,0 +1,75 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cni
import (
cnilibrary "github.com/containernetworking/cni/libcni"
"github.com/containernetworking/cni/pkg/types/current"
)
type Network struct {
cni cnilibrary.CNI
config *cnilibrary.NetworkConfigList
ifName string
}
func (n *Network) Attach(ns *Namespace) (*current.Result, error) {
r, err := n.cni.AddNetworkList(n.config, ns.config(n.ifName))
if err != nil {
return nil, err
}
return current.NewResultFromResult(r)
}
func (n *Network) Remove(ns *Namespace) error {
return n.cni.DelNetworkList(n.config, ns.config(n.ifName))
}
type Namespace struct {
id string
path string
capabilityArgs map[string]interface{}
args map[string]string
}
func newNamespace(id, path string, opts ...NamespaceOpts) (*Namespace, error) {
ns := &Namespace{
id: id,
path: path,
capabilityArgs: make(map[string]interface{}),
args: make(map[string]string),
}
for _, o := range opts {
if err := o(ns); err != nil {
return nil, err
}
}
return ns, nil
}
func (ns *Namespace) config(ifName string) *cnilibrary.RuntimeConf {
c := &cnilibrary.RuntimeConf{
ContainerID: ns.id,
NetNS: ns.path,
IfName: ifName,
}
for k, v := range ns.args {
c.Args = append(c.Args, [2]string{k, v})
}
c.CapabilityArgs = ns.capabilityArgs
return c
}

58
vendor/github.com/containerd/go-cni/namespace_opts.go generated vendored Normal file
View File

@ -0,0 +1,58 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cni
type NamespaceOpts func(s *Namespace) error
// Capabilities
func WithCapabilityPortMap(portMapping []PortMapping) NamespaceOpts {
return func(c *Namespace) error {
c.capabilityArgs["portMappings"] = portMapping
return nil
}
}
func WithCapabilityIPRanges(ipRanges []IPRanges) NamespaceOpts {
return func(c *Namespace) error {
c.capabilityArgs["ipRanges"] = ipRanges
return nil
}
}
func WithCapability(name string, capability interface{}) NamespaceOpts {
return func(c *Namespace) error {
c.capabilityArgs[name] = capability
return nil
}
}
// Args
func WithLabels(labels map[string]string) NamespaceOpts {
return func(c *Namespace) error {
for k, v := range labels {
c.args[k] = v
}
return nil
}
}
func WithArgs(k, v string) NamespaceOpts {
return func(c *Namespace) error {
c.args[k] = v
return nil
}
}

226
vendor/github.com/containerd/go-cni/opts.go generated vendored Normal file
View File

@ -0,0 +1,226 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cni
import (
"sort"
"strings"
cnilibrary "github.com/containernetworking/cni/libcni"
"github.com/pkg/errors"
)
type ConfigOption func(c *libcni) error
// WithInterfacePrefix sets the prefix for network interfaces
// e.g. eth or wlan
func WithInterfacePrefix(prefix string) ConfigOption {
return func(c *libcni) error {
c.prefix = prefix
return nil
}
}
// WithPluginDir can be used to set the locations of
// the cni plugin binaries
func WithPluginDir(dirs []string) ConfigOption {
return func(c *libcni) error {
c.pluginDirs = dirs
c.cniConfig = &cnilibrary.CNIConfig{Path: dirs}
return nil
}
}
// WithPluginConfDir can be used to configure the
// cni configuration directory.
func WithPluginConfDir(dir string) ConfigOption {
return func(c *libcni) error {
c.pluginConfDir = dir
return nil
}
}
// WithMinNetworkCount can be used to configure the
// minimum networks to be configured and initalized
// for the status to report success. By default its 1.
func WithMinNetworkCount(count int) ConfigOption {
return func(c *libcni) error {
c.networkCount = count
return nil
}
}
// LoadOption can be used with Load API
// to load network configuration from different
// sources.
type LoadOption func(c *libcni) error
// WithLoNetwork can be used to load the loopback
// network config.
func WithLoNetwork() LoadOption {
return func(c *libcni) error {
loConfig, _ := cnilibrary.ConfListFromBytes([]byte(`{
"cniVersion": "0.3.1",
"name": "cni-loopback",
"plugins": [{
"type": "loopback"
}]
}`))
c.Lock()
defer c.Unlock()
c.networks = append(c.networks,&Network{
cni: c.cniConfig,
config: loConfig,
ifName: "lo",
})
return nil
}
}
// WithConf can be used to load config directly
// from byte.
func WithConf(bytes []byte) LoadOption {
return func(c *libcni) error {
conf, err := cnilibrary.ConfFromBytes(bytes)
if err != nil {
return err
}
confList, err := cnilibrary.ConfListFromConf(conf)
if err != nil {
return err
}
c.Lock()
defer c.Unlock()
c.networks = append(c.networks, &Network{
cni: c.cniConfig,
config: confList,
ifName: getIfName(c.prefix, 0),
})
return nil
}
}
// WithConfFile can be used to load network config
// from an .conf file. Supported with absolute fileName
// with path only.
func WithConfFile(fileName string) LoadOption {
return func(c *libcni) error {
conf, err := cnilibrary.ConfFromFile(fileName)
if err != nil {
return err
}
// upconvert to conf list
confList, err := cnilibrary.ConfListFromConf(conf)
if err != nil {
return err
}
c.Lock()
defer c.Unlock()
c.networks = append(c.networks, &Network{
cni: c.cniConfig,
config: confList,
ifName: getIfName(c.prefix, 0),
})
return nil
}
}
// WithConfListFile can be used to load network config
// from an .conflist file. Supported with absolute fileName
// with path only.
func WithConfListFile(fileName string) LoadOption {
return func(c *libcni) error {
confList, err := cnilibrary.ConfListFromFile(fileName)
if err != nil {
return err
}
c.Lock()
defer c.Unlock()
c.networks = append(c.networks,&Network{
cni: c.cniConfig,
config: confList,
ifName: getIfName(c.prefix, 0),
})
return nil
}
}
// WithDefaultConf can be used to detect network config
// files from the configured cni config directory and load
// them.
func WithDefaultConf() LoadOption {
return func(c *libcni) error {
files, err := cnilibrary.ConfFiles(c.pluginConfDir, []string{".conf", ".conflist", ".json"})
switch {
case err != nil:
return errors.Wrapf(ErrRead, "failed to read config file: %v", err)
case len(files) == 0:
return errors.Wrapf(ErrCNINotInitialized, "no network config found in %s", c.pluginConfDir)
}
// files contains the network config files associated with cni network.
// Use lexicographical way as a defined order for network config files.
sort.Strings(files)
// Since the CNI spec does not specify a way to detect default networks,
// the convention chosen is - the first network configuration in the sorted
// list of network conf files as the default network and choose the default
// interface provided during init as the network interface for this default
// network. For every other network use a generated interface id.
i := 0
c.Lock()
defer c.Unlock()
for _, confFile := range files {
var confList *cnilibrary.NetworkConfigList
if strings.HasSuffix(confFile, ".conflist") {
confList, err = cnilibrary.ConfListFromFile(confFile)
if err != nil {
return errors.Wrapf(ErrInvalidConfig, "failed to load CNI config list file %s: %v", confFile, err)
}
} else {
conf, err := cnilibrary.ConfFromFile(confFile)
if err != nil {
return errors.Wrapf(ErrInvalidConfig, "failed to load CNI config file %s: %v", confFile, err)
}
// Ensure the config has a "type" so we know what plugin to run.
// Also catches the case where somebody put a conflist into a conf file.
if conf.Network.Type == "" {
return errors.Wrapf(ErrInvalidConfig, "network type not found in %s", confFile)
}
confList, err = cnilibrary.ConfListFromConf(conf)
if err != nil {
return errors.Wrapf(ErrInvalidConfig, "failed to convert CNI config file %s to list: %v", confFile, err)
}
}
if len(confList.Plugins) == 0 {
return errors.Wrapf(ErrInvalidConfig, "CNI config list %s has no networks, skipping", confFile)
}
c.networks = append(c.networks, &Network{
cni: c.cniConfig,
config: confList,
ifName: getIfName(c.prefix, i),
})
i++
}
if len(c.networks) == 0 {
return errors.Wrapf(ErrCNINotInitialized, "no valid networks found in %s", c.pluginDirs)
}
return nil
}
}

103
vendor/github.com/containerd/go-cni/result.go generated vendored Normal file
View File

@ -0,0 +1,103 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cni
import (
"net"
"github.com/containernetworking/cni/pkg/types"
"github.com/containernetworking/cni/pkg/types/current"
"github.com/pkg/errors"
)
type IPConfig struct {
IP net.IP
Gateway net.IP
}
type CNIResult struct {
Interfaces map[string]*Config
DNS []types.DNS
Routes []*types.Route
}
type Config struct {
IPConfigs []*IPConfig
Mac string
Sandbox string
}
// GetCNIResultFromResults returns a structured data containing the
// interface configuration for each of the interfaces created in the namespace.
// Conforms with
// Result:
// a) Interfaces list. Depending on the plugin, this can include the sandbox
// (eg, container or hypervisor) interface name and/or the host interface
// name, the hardware addresses of each interface, and details about the
// sandbox (if any) the interface is in.
// b) IP configuration assigned to each interface. The IPv4 and/or IPv6 addresses,
// gateways, and routes assigned to sandbox and/or host interfaces.
// c) DNS information. Dictionary that includes DNS information for nameservers,
// domain, search domains and options.
func (c *libcni) GetCNIResultFromResults(results []*current.Result) (*CNIResult, error) {
r := &CNIResult{
Interfaces: make(map[string]*Config),
}
// Plugins may not need to return Interfaces in result if
// if there are no multiple interfaces created. In that case
// all configs should be applied against default interface
r.Interfaces[defaultInterface(c.prefix)] = &Config{}
// Walk through all the results
for _, result := range results {
// Walk through all the interface in each result
for _, intf := range result.Interfaces {
r.Interfaces[intf.Name] = &Config{
Mac: intf.Mac,
Sandbox: intf.Sandbox,
}
}
// Walk through all the IPs in the result and attach it to corresponding
// interfaces
for _, ipConf := range result.IPs {
if err := validateInterfaceConfig(ipConf, len(result.Interfaces)); err != nil {
return nil, errors.Wrapf(ErrInvalidResult, "failed to valid interface config: %v", err)
}
name := c.getInterfaceName(result.Interfaces, ipConf)
r.Interfaces[name].IPConfigs = append(r.Interfaces[name].IPConfigs,
&IPConfig{IP: ipConf.Address.IP, Gateway: ipConf.Gateway})
}
r.DNS = append(r.DNS, result.DNS)
r.Routes = append(r.Routes, result.Routes...)
}
if _, ok := r.Interfaces[defaultInterface(c.prefix)]; !ok {
return nil, errors.Wrapf(ErrNotFound, "default network not found")
}
return r, nil
}
// getInterfaceName returns the interface name if the plugins
// return the result with associated interfaces. If interface
// is not present then default interface name is used
func (c *libcni) getInterfaceName(interfaces []*current.Interface,
ipConf *current.IPConfig) string {
if ipConf.Interface != nil {
return interfaces[*ipConf.Interface].Name
}
return defaultInterface(c.prefix)
}

78
vendor/github.com/containerd/go-cni/testutils.go generated vendored Normal file
View File

@ -0,0 +1,78 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cni
import (
"fmt"
"io/ioutil"
"os"
"path"
"testing"
)
func makeTmpDir(prefix string) (string, error) {
tmpDir, err := ioutil.TempDir(os.TempDir(), prefix)
if err != nil {
return "", err
}
return tmpDir, nil
}
func makeFakeCNIConfig(t *testing.T) (string, string) {
cniDir, err := makeTmpDir("fakecni")
if err != nil {
t.Fatalf("Failed to create plugin config dir: %v", err)
}
cniConfDir := path.Join(cniDir, "net.d")
err = os.MkdirAll(cniConfDir, 0777)
if err != nil {
t.Fatalf("Failed to create network config dir: %v", err)
}
networkConfig1 := path.Join(cniConfDir, "mocknetwork1.conf")
f1, err := os.Create(networkConfig1)
if err != nil {
t.Fatalf("Failed to create network config %v: %v", f1, err)
}
networkConfig2 := path.Join(cniConfDir, "mocknetwork2.conf")
f2, err := os.Create(networkConfig2)
if err != nil {
t.Fatalf("Failed to create network config %v: %v", f2, err)
}
cfg1 := fmt.Sprintf(`{ "name": "%s", "type": "%s", "capabilities": {"portMappings": true} }`, "plugin1", "fakecni")
_, err = f1.WriteString(cfg1)
if err != nil {
t.Fatalf("Failed to write network config file %v: %v", f1, err)
}
f1.Close()
cfg2 := fmt.Sprintf(`{ "name": "%s", "type": "%s", "capabilities": {"portMappings": true} }`, "plugin2", "fakecni")
_, err = f2.WriteString(cfg2)
if err != nil {
t.Fatalf("Failed to write network config file %v: %v", f2, err)
}
f2.Close()
return cniDir, cniConfDir
}
func tearDownCNIConfig(t *testing.T, confDir string) {
err := os.RemoveAll(confDir)
if err != nil {
t.Fatalf("Failed to cleanup CNI configs: %v", err)
}
}

45
vendor/github.com/containerd/go-cni/types.go generated vendored Normal file
View File

@ -0,0 +1,45 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cni
const (
CNIPluginName = "cni"
DefaultNetDir = "/etc/cni/net.d"
DefaultCNIDir = "/opt/cni/bin"
VendorCNIDirTemplate = "%s/opt/%s/bin"
DefaultPrefix = "eth"
)
type config struct {
pluginDirs []string
pluginConfDir string
prefix string
}
type PortMapping struct {
HostPort int32
ContainerPort int32
Protocol string
HostIP string
}
type IPRanges struct {
Subnet string
RangeStart string
RangeEnd string
Gateway string
}

6
vendor/github.com/containerd/go-cni/vendor.conf generated vendored Normal file
View File

@ -0,0 +1,6 @@
github.com/stretchr/testify b89eecf5ca5db6d3ba60b237ffe3df7bafb7662f
github.com/davecgh/go-spew 8991bc29aa16c548c550c7ff78260e27b9ab7c73
github.com/pmezard/go-difflib 792786c7400a136282c1664665ae0a8db921c6c2
github.com/stretchr/objx 8a3f7159479fbc75b30357fbc48f380b7320f08e
github.com/containernetworking/cni 142cde0c766cd6055cc7fdfdcb44579c0c9c35bf
github.com/pkg/errors v0.8.0

View File

@ -1,4 +1,5 @@
[![Build Status](https://travis-ci.org/containernetworking/plugins.svg?branch=master)](https://travis-ci.org/containernetworking/plugins) [![Linux Build Status](https://travis-ci.org/containernetworking/plugins.svg?branch=master)](https://travis-ci.org/containernetworking/plugins)
[![Windows Build Status](https://ci.appveyor.com/api/projects/status/kcuubx0chr76ev86/branch/master?svg=true)](https://ci.appveyor.com/project/cni-bot/plugins/branch/master)
# plugins # plugins
Some CNI network plugins, maintained by the containernetworking team. For more information, see the individual READMEs. Some CNI network plugins, maintained by the containernetworking team. For more information, see the individual READMEs.

View File

@ -1,178 +0,0 @@
// Copyright 2015 CNI authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ns
import (
"fmt"
"os"
"runtime"
"sync"
"syscall"
)
type NetNS interface {
// Executes the passed closure in this object's network namespace,
// attempting to restore the original namespace before returning.
// However, since each OS thread can have a different network namespace,
// and Go's thread scheduling is highly variable, callers cannot
// guarantee any specific namespace is set unless operations that
// require that namespace are wrapped with Do(). Also, no code called
// from Do() should call runtime.UnlockOSThread(), or the risk
// of executing code in an incorrect namespace will be greater. See
// https://github.com/golang/go/wiki/LockOSThread for further details.
Do(toRun func(NetNS) error) error
// Sets the current network namespace to this object's network namespace.
// Note that since Go's thread scheduling is highly variable, callers
// cannot guarantee the requested namespace will be the current namespace
// after this function is called; to ensure this wrap operations that
// require the namespace with Do() instead.
Set() error
// Returns the filesystem path representing this object's network namespace
Path() string
// Returns a file descriptor representing this object's network namespace
Fd() uintptr
// Cleans up this instance of the network namespace; if this instance
// is the last user the namespace will be destroyed
Close() error
}
type netNS struct {
file *os.File
mounted bool
closed bool
}
// netNS implements the NetNS interface
var _ NetNS = &netNS{}
const (
// https://github.com/torvalds/linux/blob/master/include/uapi/linux/magic.h
NSFS_MAGIC = 0x6e736673
PROCFS_MAGIC = 0x9fa0
)
type NSPathNotExistErr struct{ msg string }
func (e NSPathNotExistErr) Error() string { return e.msg }
type NSPathNotNSErr struct{ msg string }
func (e NSPathNotNSErr) Error() string { return e.msg }
func IsNSorErr(nspath string) error {
stat := syscall.Statfs_t{}
if err := syscall.Statfs(nspath, &stat); err != nil {
if os.IsNotExist(err) {
err = NSPathNotExistErr{msg: fmt.Sprintf("failed to Statfs %q: %v", nspath, err)}
} else {
err = fmt.Errorf("failed to Statfs %q: %v", nspath, err)
}
return err
}
switch stat.Type {
case PROCFS_MAGIC, NSFS_MAGIC:
return nil
default:
return NSPathNotNSErr{msg: fmt.Sprintf("unknown FS magic on %q: %x", nspath, stat.Type)}
}
}
// Returns an object representing the namespace referred to by @path
func GetNS(nspath string) (NetNS, error) {
err := IsNSorErr(nspath)
if err != nil {
return nil, err
}
fd, err := os.Open(nspath)
if err != nil {
return nil, err
}
return &netNS{file: fd}, nil
}
func (ns *netNS) Path() string {
return ns.file.Name()
}
func (ns *netNS) Fd() uintptr {
return ns.file.Fd()
}
func (ns *netNS) errorIfClosed() error {
if ns.closed {
return fmt.Errorf("%q has already been closed", ns.file.Name())
}
return nil
}
func (ns *netNS) Do(toRun func(NetNS) error) error {
if err := ns.errorIfClosed(); err != nil {
return err
}
containedCall := func(hostNS NetNS) error {
threadNS, err := GetCurrentNS()
if err != nil {
return fmt.Errorf("failed to open current netns: %v", err)
}
defer threadNS.Close()
// switch to target namespace
if err = ns.Set(); err != nil {
return fmt.Errorf("error switching to ns %v: %v", ns.file.Name(), err)
}
defer threadNS.Set() // switch back
return toRun(hostNS)
}
// save a handle to current network namespace
hostNS, err := GetCurrentNS()
if err != nil {
return fmt.Errorf("Failed to open current namespace: %v", err)
}
defer hostNS.Close()
var wg sync.WaitGroup
wg.Add(1)
var innerError error
go func() {
defer wg.Done()
runtime.LockOSThread()
innerError = containedCall(hostNS)
}()
wg.Wait()
return innerError
}
// WithNetNSPath executes the passed closure under the given network
// namespace, restoring the original namespace afterwards.
func WithNetNSPath(nspath string, toRun func(NetNS) error) error {
ns, err := GetNS(nspath)
if err != nil {
return err
}
defer ns.Close()
return ns.Do(toRun)
}

View File

@ -21,6 +21,7 @@ import (
"path" "path"
"runtime" "runtime"
"sync" "sync"
"syscall"
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
) )
@ -147,3 +148,158 @@ func (ns *netNS) Set() error {
return nil return nil
} }
type NetNS interface {
// Executes the passed closure in this object's network namespace,
// attempting to restore the original namespace before returning.
// However, since each OS thread can have a different network namespace,
// and Go's thread scheduling is highly variable, callers cannot
// guarantee any specific namespace is set unless operations that
// require that namespace are wrapped with Do(). Also, no code called
// from Do() should call runtime.UnlockOSThread(), or the risk
// of executing code in an incorrect namespace will be greater. See
// https://github.com/golang/go/wiki/LockOSThread for further details.
Do(toRun func(NetNS) error) error
// Sets the current network namespace to this object's network namespace.
// Note that since Go's thread scheduling is highly variable, callers
// cannot guarantee the requested namespace will be the current namespace
// after this function is called; to ensure this wrap operations that
// require the namespace with Do() instead.
Set() error
// Returns the filesystem path representing this object's network namespace
Path() string
// Returns a file descriptor representing this object's network namespace
Fd() uintptr
// Cleans up this instance of the network namespace; if this instance
// is the last user the namespace will be destroyed
Close() error
}
type netNS struct {
file *os.File
mounted bool
closed bool
}
// netNS implements the NetNS interface
var _ NetNS = &netNS{}
const (
// https://github.com/torvalds/linux/blob/master/include/uapi/linux/magic.h
NSFS_MAGIC = 0x6e736673
PROCFS_MAGIC = 0x9fa0
)
type NSPathNotExistErr struct{ msg string }
func (e NSPathNotExistErr) Error() string { return e.msg }
type NSPathNotNSErr struct{ msg string }
func (e NSPathNotNSErr) Error() string { return e.msg }
func IsNSorErr(nspath string) error {
stat := syscall.Statfs_t{}
if err := syscall.Statfs(nspath, &stat); err != nil {
if os.IsNotExist(err) {
err = NSPathNotExistErr{msg: fmt.Sprintf("failed to Statfs %q: %v", nspath, err)}
} else {
err = fmt.Errorf("failed to Statfs %q: %v", nspath, err)
}
return err
}
switch stat.Type {
case PROCFS_MAGIC, NSFS_MAGIC:
return nil
default:
return NSPathNotNSErr{msg: fmt.Sprintf("unknown FS magic on %q: %x", nspath, stat.Type)}
}
}
// Returns an object representing the namespace referred to by @path
func GetNS(nspath string) (NetNS, error) {
err := IsNSorErr(nspath)
if err != nil {
return nil, err
}
fd, err := os.Open(nspath)
if err != nil {
return nil, err
}
return &netNS{file: fd}, nil
}
func (ns *netNS) Path() string {
return ns.file.Name()
}
func (ns *netNS) Fd() uintptr {
return ns.file.Fd()
}
func (ns *netNS) errorIfClosed() error {
if ns.closed {
return fmt.Errorf("%q has already been closed", ns.file.Name())
}
return nil
}
func (ns *netNS) Do(toRun func(NetNS) error) error {
if err := ns.errorIfClosed(); err != nil {
return err
}
containedCall := func(hostNS NetNS) error {
threadNS, err := GetCurrentNS()
if err != nil {
return fmt.Errorf("failed to open current netns: %v", err)
}
defer threadNS.Close()
// switch to target namespace
if err = ns.Set(); err != nil {
return fmt.Errorf("error switching to ns %v: %v", ns.file.Name(), err)
}
defer threadNS.Set() // switch back
return toRun(hostNS)
}
// save a handle to current network namespace
hostNS, err := GetCurrentNS()
if err != nil {
return fmt.Errorf("Failed to open current namespace: %v", err)
}
defer hostNS.Close()
var wg sync.WaitGroup
wg.Add(1)
var innerError error
go func() {
defer wg.Done()
runtime.LockOSThread()
innerError = containedCall(hostNS)
}()
wg.Wait()
return innerError
}
// WithNetNSPath executes the passed closure under the given network
// namespace, restoring the original namespace afterwards.
func WithNetNSPath(nspath string, toRun func(NetNS) error) error {
ns, err := GetNS(nspath)
if err != nil {
return err
}
defer ns.Close()
return ns.Do(toRun)
}

View File

@ -1,36 +0,0 @@
// Copyright 2015-2017 CNI authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !linux
package ns
import "github.com/containernetworking/cni/pkg/types"
// Returns an object representing the current OS thread's network namespace
func GetCurrentNS() (NetNS, error) {
return nil, types.NotImplementedError
}
func NewNS() (NetNS, error) {
return nil, types.NotImplementedError
}
func (ns *netNS) Close() error {
return types.NotImplementedError
}
func (ns *netNS) Set() error {
return types.NotImplementedError
}

View File

@ -1,3 +0,0 @@
# ocicni
API layer to call the CNI plugins from an OCI lifecycle daemon

View File

@ -1,425 +0,0 @@
package ocicni
import (
"errors"
"fmt"
"os"
"os/exec"
"sort"
"strings"
"sync"
"github.com/containernetworking/cni/libcni"
cnitypes "github.com/containernetworking/cni/pkg/types"
"github.com/fsnotify/fsnotify"
"github.com/sirupsen/logrus"
)
type cniNetworkPlugin struct {
loNetwork *cniNetwork
sync.RWMutex
defaultNetwork *cniNetwork
nsenterPath string
pluginDir string
cniDirs []string
vendorCNIDirPrefix string
monitorNetDirChan chan struct{}
// The pod map provides synchronization for a given pod's network
// operations. Each pod's setup/teardown/status operations
// are synchronized against each other, but network operations of other
// pods can proceed in parallel.
podsLock sync.Mutex
pods map[string]*podLock
}
type cniNetwork struct {
name string
NetworkConfig *libcni.NetworkConfigList
CNIConfig libcni.CNI
}
var errMissingDefaultNetwork = errors.New("Missing CNI default network")
type podLock struct {
// Count of in-flight operations for this pod; when this reaches zero
// the lock can be removed from the pod map
refcount uint
// Lock to synchronize operations for this specific pod
mu sync.Mutex
}
func buildFullPodName(podNetwork PodNetwork) string {
return podNetwork.Namespace + "_" + podNetwork.Name
}
// Lock network operations for a specific pod. If that pod is not yet in
// the pod map, it will be added. The reference count for the pod will
// be increased.
func (plugin *cniNetworkPlugin) podLock(podNetwork PodNetwork) *sync.Mutex {
plugin.podsLock.Lock()
defer plugin.podsLock.Unlock()
fullPodName := buildFullPodName(podNetwork)
lock, ok := plugin.pods[fullPodName]
if !ok {
lock = &podLock{}
plugin.pods[fullPodName] = lock
}
lock.refcount++
return &lock.mu
}
// Unlock network operations for a specific pod. The reference count for the
// pod will be decreased. If the reference count reaches zero, the pod will be
// removed from the pod map.
func (plugin *cniNetworkPlugin) podUnlock(podNetwork PodNetwork) {
plugin.podsLock.Lock()
defer plugin.podsLock.Unlock()
fullPodName := buildFullPodName(podNetwork)
lock, ok := plugin.pods[fullPodName]
if !ok {
logrus.Warningf("Unbalanced pod lock unref for %s", fullPodName)
return
} else if lock.refcount == 0 {
// This should never ever happen, but handle it anyway
delete(plugin.pods, fullPodName)
logrus.Errorf("Pod lock for %s still in map with zero refcount", fullPodName)
return
}
lock.refcount--
lock.mu.Unlock()
if lock.refcount == 0 {
delete(plugin.pods, fullPodName)
}
}
func (plugin *cniNetworkPlugin) monitorNetDir() {
watcher, err := fsnotify.NewWatcher()
if err != nil {
logrus.Errorf("could not create new watcher %v", err)
return
}
defer watcher.Close()
if err = watcher.Add(plugin.pluginDir); err != nil {
logrus.Errorf("Failed to add watch on %q: %v", plugin.pluginDir, err)
return
}
// Now that `watcher` is running and watching the `pluginDir`
// gather the initial configuration, before starting the
// goroutine which will actually process events. It has to be
// done in this order to avoid missing any updates which might
// otherwise occur between gathering the initial configuration
// and starting the watcher.
if err := plugin.syncNetworkConfig(); err != nil {
logrus.Infof("Initial CNI setting failed, continue monitoring: %v", err)
} else {
logrus.Infof("Initial CNI setting succeeded")
}
go func() {
for {
select {
case event := <-watcher.Events:
logrus.Debugf("CNI monitoring event %v", event)
if event.Op&fsnotify.Create != fsnotify.Create &&
event.Op&fsnotify.Write != fsnotify.Write {
continue
}
if err = plugin.syncNetworkConfig(); err == nil {
logrus.Infof("CNI asynchronous setting succeeded")
continue
}
logrus.Errorf("CNI setting failed, continue monitoring: %v", err)
case err := <-watcher.Errors:
logrus.Errorf("CNI monitoring error %v", err)
close(plugin.monitorNetDirChan)
return
}
}
}()
<-plugin.monitorNetDirChan
}
// InitCNI takes the plugin directory and CNI directories where the CNI config
// files should be searched for. If no valid CNI configs exist, network requests
// will fail until valid CNI config files are present in the config directory.
func InitCNI(pluginDir string, cniDirs ...string) (CNIPlugin, error) {
vendorCNIDirPrefix := ""
plugin := &cniNetworkPlugin{
defaultNetwork: nil,
loNetwork: getLoNetwork(cniDirs, vendorCNIDirPrefix),
pluginDir: pluginDir,
cniDirs: cniDirs,
vendorCNIDirPrefix: vendorCNIDirPrefix,
monitorNetDirChan: make(chan struct{}),
pods: make(map[string]*podLock),
}
var err error
plugin.nsenterPath, err = exec.LookPath("nsenter")
if err != nil {
return nil, err
}
// Ensure plugin directory exists, because the following monitoring logic
// relies on that.
if err := os.MkdirAll(pluginDir, 0755); err != nil {
return nil, err
}
go plugin.monitorNetDir()
return plugin, nil
}
func getDefaultCNINetwork(pluginDir string, cniDirs []string, vendorCNIDirPrefix string) (*cniNetwork, error) {
if pluginDir == "" {
pluginDir = DefaultNetDir
}
if len(cniDirs) == 0 {
cniDirs = []string{DefaultCNIDir}
}
files, err := libcni.ConfFiles(pluginDir, []string{".conf", ".conflist", ".json"})
switch {
case err != nil:
return nil, err
case len(files) == 0:
return nil, errMissingDefaultNetwork
}
sort.Strings(files)
for _, confFile := range files {
var confList *libcni.NetworkConfigList
if strings.HasSuffix(confFile, ".conflist") {
confList, err = libcni.ConfListFromFile(confFile)
if err != nil {
logrus.Warningf("Error loading CNI config list file %s: %v", confFile, err)
continue
}
} else {
conf, err := libcni.ConfFromFile(confFile)
if err != nil {
logrus.Warningf("Error loading CNI config file %s: %v", confFile, err)
continue
}
if conf.Network.Type == "" {
logrus.Warningf("Error loading CNI config file %s: no 'type'; perhaps this is a .conflist?", confFile)
continue
}
confList, err = libcni.ConfListFromConf(conf)
if err != nil {
logrus.Warningf("Error converting CNI config file %s to list: %v", confFile, err)
continue
}
}
if len(confList.Plugins) == 0 {
logrus.Warningf("CNI config list %s has no networks, skipping", confFile)
continue
}
logrus.Infof("CNI network %s (type=%v) is used from %s", confList.Name, confList.Plugins[0].Network.Type, confFile)
// Search for vendor-specific plugins as well as default plugins in the CNI codebase.
vendorDir := vendorCNIDir(vendorCNIDirPrefix, confList.Plugins[0].Network.Type)
cninet := &libcni.CNIConfig{
Path: append(cniDirs, vendorDir),
}
network := &cniNetwork{name: confList.Name, NetworkConfig: confList, CNIConfig: cninet}
return network, nil
}
return nil, fmt.Errorf("No valid networks found in %s", pluginDir)
}
func vendorCNIDir(prefix, pluginType string) string {
return fmt.Sprintf(VendorCNIDirTemplate, prefix, pluginType)
}
func getLoNetwork(cniDirs []string, vendorDirPrefix string) *cniNetwork {
if len(cniDirs) == 0 {
cniDirs = []string{DefaultCNIDir}
}
loConfig, err := libcni.ConfListFromBytes([]byte(`{
"cniVersion": "0.2.0",
"name": "cni-loopback",
"plugins": [{
"type": "loopback"
}]
}`))
if err != nil {
// The hardcoded config above should always be valid and unit tests will
// catch this
panic(err)
}
vendorDir := vendorCNIDir(vendorDirPrefix, loConfig.Plugins[0].Network.Type)
cninet := &libcni.CNIConfig{
Path: append(cniDirs, vendorDir),
}
loNetwork := &cniNetwork{
name: "lo",
NetworkConfig: loConfig,
CNIConfig: cninet,
}
return loNetwork
}
func (plugin *cniNetworkPlugin) syncNetworkConfig() error {
network, err := getDefaultCNINetwork(plugin.pluginDir, plugin.cniDirs, plugin.vendorCNIDirPrefix)
if err != nil {
logrus.Errorf("error updating cni config: %s", err)
return err
}
plugin.setDefaultNetwork(network)
return nil
}
func (plugin *cniNetworkPlugin) getDefaultNetwork() *cniNetwork {
plugin.RLock()
defer plugin.RUnlock()
return plugin.defaultNetwork
}
func (plugin *cniNetworkPlugin) setDefaultNetwork(n *cniNetwork) {
plugin.Lock()
defer plugin.Unlock()
plugin.defaultNetwork = n
}
func (plugin *cniNetworkPlugin) checkInitialized() error {
if plugin.getDefaultNetwork() == nil {
return errors.New("cni config uninitialized")
}
return nil
}
func (plugin *cniNetworkPlugin) Name() string {
return CNIPluginName
}
func (plugin *cniNetworkPlugin) SetUpPod(podNetwork PodNetwork) (cnitypes.Result, error) {
if err := plugin.checkInitialized(); err != nil {
return nil, err
}
plugin.podLock(podNetwork).Lock()
defer plugin.podUnlock(podNetwork)
_, err := plugin.loNetwork.addToNetwork(podNetwork)
if err != nil {
logrus.Errorf("Error while adding to cni lo network: %s", err)
return nil, err
}
result, err := plugin.getDefaultNetwork().addToNetwork(podNetwork)
if err != nil {
logrus.Errorf("Error while adding to cni network: %s", err)
return nil, err
}
return result, err
}
func (plugin *cniNetworkPlugin) TearDownPod(podNetwork PodNetwork) error {
if err := plugin.checkInitialized(); err != nil {
return err
}
plugin.podLock(podNetwork).Lock()
defer plugin.podUnlock(podNetwork)
return plugin.getDefaultNetwork().deleteFromNetwork(podNetwork)
}
// TODO: Use the addToNetwork function to obtain the IP of the Pod. That will assume idempotent ADD call to the plugin.
// Also fix the runtime's call to Status function to be done only in the case that the IP is lost, no need to do periodic calls
func (plugin *cniNetworkPlugin) GetPodNetworkStatus(podNetwork PodNetwork) (string, error) {
plugin.podLock(podNetwork).Lock()
defer plugin.podUnlock(podNetwork)
ip, err := getContainerIP(plugin.nsenterPath, podNetwork.NetNS, DefaultInterfaceName, "-4")
if err != nil {
ip, err = getContainerIP(plugin.nsenterPath, podNetwork.NetNS, DefaultInterfaceName, "-6")
}
if err != nil {
return "", err
}
return ip.String(), nil
}
func (network *cniNetwork) addToNetwork(podNetwork PodNetwork) (cnitypes.Result, error) {
rt, err := buildCNIRuntimeConf(podNetwork)
if err != nil {
logrus.Errorf("Error adding network: %v", err)
return nil, err
}
netconf, cninet := network.NetworkConfig, network.CNIConfig
logrus.Infof("About to add CNI network %s (type=%v)", netconf.Name, netconf.Plugins[0].Network.Type)
res, err := cninet.AddNetworkList(netconf, rt)
if err != nil {
logrus.Errorf("Error adding network: %v", err)
return nil, err
}
return res, nil
}
func (network *cniNetwork) deleteFromNetwork(podNetwork PodNetwork) error {
rt, err := buildCNIRuntimeConf(podNetwork)
if err != nil {
logrus.Errorf("Error deleting network: %v", err)
return err
}
netconf, cninet := network.NetworkConfig, network.CNIConfig
logrus.Infof("About to del CNI network %s (type=%v)", netconf.Name, netconf.Plugins[0].Network.Type)
err = cninet.DelNetworkList(netconf, rt)
if err != nil {
logrus.Errorf("Error deleting network: %v", err)
return err
}
return nil
}
func buildCNIRuntimeConf(podNetwork PodNetwork) (*libcni.RuntimeConf, error) {
logrus.Infof("Got pod network %+v", podNetwork)
rt := &libcni.RuntimeConf{
ContainerID: podNetwork.ID,
NetNS: podNetwork.NetNS,
IfName: DefaultInterfaceName,
Args: [][2]string{
{"IgnoreUnknown", "1"},
{"K8S_POD_NAMESPACE", podNetwork.Namespace},
{"K8S_POD_NAME", podNetwork.Name},
{"K8S_POD_INFRA_CONTAINER_ID", podNetwork.ID},
},
}
if len(podNetwork.PortMappings) == 0 {
return rt, nil
}
rt.CapabilityArgs = map[string]interface{}{
"portMappings": podNetwork.PortMappings,
}
return rt, nil
}
func (plugin *cniNetworkPlugin) Status() error {
return plugin.checkInitialized()
}

View File

@ -1,66 +0,0 @@
package ocicni
import (
"github.com/containernetworking/cni/pkg/types"
)
const (
// DefaultInterfaceName is the string to be used for the interface name inside the net namespace
DefaultInterfaceName = "eth0"
// CNIPluginName is the default name of the plugin
CNIPluginName = "cni"
// DefaultNetDir is the place to look for CNI Network
DefaultNetDir = "/etc/cni/net.d"
// DefaultCNIDir is the place to look for cni config files
DefaultCNIDir = "/opt/cni/bin"
// VendorCNIDirTemplate is the template for looking up vendor specific cni config/executable files
VendorCNIDirTemplate = "%s/opt/%s/bin"
)
// PortMapping maps to the standard CNI portmapping Capability
// see: https://github.com/containernetworking/cni/blob/master/CONVENTIONS.md
type PortMapping struct {
// HostPort is the port number on the host.
HostPort int32 `json:"hostPort"`
// ContainerPort is the port number inside the sandbox.
ContainerPort int32 `json:"containerPort"`
// Protocol is the protocol of the port mapping.
Protocol string `json:"protocol"`
// HostIP is the host ip to use.
HostIP string `json:"hostIP"`
}
// PodNetwork configures the network of a pod sandbox.
type PodNetwork struct {
// Name is the name of the sandbox.
Name string
// Namespace is the namespace of the sandbox.
Namespace string
// ID is the id of the sandbox container.
ID string
// NetNS is the network namespace path of the sandbox.
NetNS string
// PortMappings is the port mapping of the sandbox.
PortMappings []PortMapping
}
// CNIPlugin is the interface that needs to be implemented by a plugin
type CNIPlugin interface {
// Name returns the plugin's name. This will be used when searching
// for a plugin by name, e.g.
Name() string
// SetUpPod is the method called after the sandbox container of
// the pod has been created but before the other containers of the
// pod are launched.
SetUpPod(network PodNetwork) (types.Result, error)
// TearDownPod is the method called before a pod's sandbox container will be deleted
TearDownPod(network PodNetwork) error
// Status is the method called to obtain the ipv4 or ipv6 addresses of the pod sandbox
GetPodNetworkStatus(network PodNetwork) (string, error)
// NetworkStatus returns error if the network plugin is in error state
Status() error
}

View File

@ -1,32 +0,0 @@
package ocicni
import (
"fmt"
"net"
"os/exec"
"strings"
)
func getContainerIP(nsenterPath, netnsPath, interfaceName, addrType string) (net.IP, error) {
// Try to retrieve ip inside container network namespace
output, err := exec.Command(nsenterPath, fmt.Sprintf("--net=%s", netnsPath), "-F", "--",
"ip", "-o", addrType, "addr", "show", "dev", interfaceName, "scope", "global").CombinedOutput()
if err != nil {
return nil, fmt.Errorf("Unexpected command output %s with error: %v", output, err)
}
lines := strings.Split(string(output), "\n")
if len(lines) < 1 {
return nil, fmt.Errorf("Unexpected command output %s", output)
}
fields := strings.Fields(lines[0])
if len(fields) < 4 {
return nil, fmt.Errorf("Unexpected address output %s ", lines[0])
}
ip, _, err := net.ParseCIDR(fields[3])
if err != nil {
return nil, fmt.Errorf("CNI failed to parse ip from output %s due to %v", output, err)
}
return ip, nil
}

View File

@ -1,79 +0,0 @@
# File system notifications for Go
[![GoDoc](https://godoc.org/github.com/fsnotify/fsnotify?status.svg)](https://godoc.org/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify)
fsnotify utilizes [golang.org/x/sys](https://godoc.org/golang.org/x/sys) rather than `syscall` from the standard library. Ensure you have the latest version installed by running:
```console
go get -u golang.org/x/sys/...
```
Cross platform: Windows, Linux, BSD and macOS.
|Adapter |OS |Status |
|----------|----------|----------|
|inotify |Linux 2.6.27 or later, Android\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)|
|kqueue |BSD, macOS, iOS\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)|
|ReadDirectoryChangesW|Windows|Supported [![Build status](https://ci.appveyor.com/api/projects/status/ivwjubaih4r0udeh/branch/master?svg=true)](https://ci.appveyor.com/project/NathanYoungman/fsnotify/branch/master)|
|FSEvents |macOS |[Planned](https://github.com/fsnotify/fsnotify/issues/11)|
|FEN |Solaris 11 |[In Progress](https://github.com/fsnotify/fsnotify/issues/12)|
|fanotify |Linux 2.6.37+ | |
|USN Journals |Windows |[Maybe](https://github.com/fsnotify/fsnotify/issues/53)|
|Polling |*All* |[Maybe](https://github.com/fsnotify/fsnotify/issues/9)|
\* Android and iOS are untested.
Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information.
## API stability
fsnotify is a fork of [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA).
All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). Further API changes are [planned](https://github.com/fsnotify/fsnotify/milestones), and will be tagged with a new major revision number.
Go 1.6 supports dependencies located in the `vendor/` folder. Unless you are creating a library, it is recommended that you copy fsnotify into `vendor/github.com/fsnotify/fsnotify` within your project, and likewise for `golang.org/x/sys`.
## Contributing
Please refer to [CONTRIBUTING][] before opening an issue or pull request.
## Example
See [example_test.go](https://github.com/fsnotify/fsnotify/blob/master/example_test.go).
## FAQ
**When a file is moved to another directory is it still being watched?**
No (it shouldn't be, unless you are watching where it was moved to).
**When I watch a directory, are all subdirectories watched as well?**
No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]).
**Do I have to watch the Error and Event channels in a separate goroutine?**
As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7])
**Why am I receiving multiple events for the same file on OS X?**
Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]).
**How many files can be watched at once?**
There are OS-specific limits as to how many watches can be created:
* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error.
* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error.
[#62]: https://github.com/howeyc/fsnotify/issues/62
[#18]: https://github.com/fsnotify/fsnotify/issues/18
[#11]: https://github.com/fsnotify/fsnotify/issues/11
[#7]: https://github.com/howeyc/fsnotify/issues/7
[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md
## Related Projects
* [notify](https://github.com/rjeczalik/notify)
* [fsevents](https://github.com/fsnotify/fsevents)

View File

@ -1,37 +0,0 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build solaris
package fsnotify
import (
"errors"
)
// Watcher watches a set of files, delivering events to a channel.
type Watcher struct {
Events chan Event
Errors chan error
}
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
func NewWatcher() (*Watcher, error) {
return nil, errors.New("FEN based watcher not yet supported for fsnotify\n")
}
// Close removes all watches and closes the events channel.
func (w *Watcher) Close() error {
return nil
}
// Add starts watching the named file or directory (non-recursively).
func (w *Watcher) Add(name string) error {
return nil
}
// Remove stops watching the the named file or directory (non-recursively).
func (w *Watcher) Remove(name string) error {
return nil
}

View File

@ -1,66 +0,0 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !plan9
// Package fsnotify provides a platform-independent interface for file system notifications.
package fsnotify
import (
"bytes"
"errors"
"fmt"
)
// Event represents a single file system notification.
type Event struct {
Name string // Relative path to the file or directory.
Op Op // File operation that triggered the event.
}
// Op describes a set of file operations.
type Op uint32
// These are the generalized file operations that can trigger a notification.
const (
Create Op = 1 << iota
Write
Remove
Rename
Chmod
)
func (op Op) String() string {
// Use a buffer for efficient string concatenation
var buffer bytes.Buffer
if op&Create == Create {
buffer.WriteString("|CREATE")
}
if op&Remove == Remove {
buffer.WriteString("|REMOVE")
}
if op&Write == Write {
buffer.WriteString("|WRITE")
}
if op&Rename == Rename {
buffer.WriteString("|RENAME")
}
if op&Chmod == Chmod {
buffer.WriteString("|CHMOD")
}
if buffer.Len() == 0 {
return ""
}
return buffer.String()[1:] // Strip leading pipe
}
// String returns a string representation of the event in the form
// "file: REMOVE|WRITE|..."
func (e Event) String() string {
return fmt.Sprintf("%q: %s", e.Name, e.Op.String())
}
// Common errors that can be reported by a watcher
var ErrEventOverflow = errors.New("fsnotify queue overflow")

View File

@ -1,334 +0,0 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux
package fsnotify
import (
"errors"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"sync"
"unsafe"
"golang.org/x/sys/unix"
)
// Watcher watches a set of files, delivering events to a channel.
type Watcher struct {
Events chan Event
Errors chan error
mu sync.Mutex // Map access
cv *sync.Cond // sync removing on rm_watch with IN_IGNORE
fd int
poller *fdPoller
watches map[string]*watch // Map of inotify watches (key: path)
paths map[int]string // Map of watched paths (key: watch descriptor)
done chan struct{} // Channel for sending a "quit message" to the reader goroutine
doneResp chan struct{} // Channel to respond to Close
}
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
func NewWatcher() (*Watcher, error) {
// Create inotify fd
fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC)
if fd == -1 {
return nil, errno
}
// Create epoll
poller, err := newFdPoller(fd)
if err != nil {
unix.Close(fd)
return nil, err
}
w := &Watcher{
fd: fd,
poller: poller,
watches: make(map[string]*watch),
paths: make(map[int]string),
Events: make(chan Event),
Errors: make(chan error),
done: make(chan struct{}),
doneResp: make(chan struct{}),
}
w.cv = sync.NewCond(&w.mu)
go w.readEvents()
return w, nil
}
func (w *Watcher) isClosed() bool {
select {
case <-w.done:
return true
default:
return false
}
}
// Close removes all watches and closes the events channel.
func (w *Watcher) Close() error {
if w.isClosed() {
return nil
}
// Send 'close' signal to goroutine, and set the Watcher to closed.
close(w.done)
// Wake up goroutine
w.poller.wake()
// Wait for goroutine to close
<-w.doneResp
return nil
}
// Add starts watching the named file or directory (non-recursively).
func (w *Watcher) Add(name string) error {
name = filepath.Clean(name)
if w.isClosed() {
return errors.New("inotify instance already closed")
}
const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM |
unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY |
unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF
var flags uint32 = agnosticEvents
w.mu.Lock()
watchEntry, found := w.watches[name]
w.mu.Unlock()
if found {
watchEntry.flags |= flags
flags |= unix.IN_MASK_ADD
}
wd, errno := unix.InotifyAddWatch(w.fd, name, flags)
if wd == -1 {
return errno
}
w.mu.Lock()
w.watches[name] = &watch{wd: uint32(wd), flags: flags}
w.paths[wd] = name
w.mu.Unlock()
return nil
}
// Remove stops watching the named file or directory (non-recursively).
func (w *Watcher) Remove(name string) error {
name = filepath.Clean(name)
// Fetch the watch.
w.mu.Lock()
defer w.mu.Unlock()
watch, ok := w.watches[name]
// Remove it from inotify.
if !ok {
return fmt.Errorf("can't remove non-existent inotify watch for: %s", name)
}
// inotify_rm_watch will return EINVAL if the file has been deleted;
// the inotify will already have been removed.
// watches and pathes are deleted in ignoreLinux() implicitly and asynchronously
// by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE
// so that EINVAL means that the wd is being rm_watch()ed or its file removed
// by another thread and we have not received IN_IGNORE event.
success, errno := unix.InotifyRmWatch(w.fd, watch.wd)
if success == -1 {
// TODO: Perhaps it's not helpful to return an error here in every case.
// the only two possible errors are:
// EBADF, which happens when w.fd is not a valid file descriptor of any kind.
// EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor.
// Watch descriptors are invalidated when they are removed explicitly or implicitly;
// explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted.
return errno
}
// wait until ignoreLinux() deleting maps
exists := true
for exists {
w.cv.Wait()
_, exists = w.watches[name]
}
return nil
}
type watch struct {
wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
}
// readEvents reads from the inotify file descriptor, converts the
// received events into Event objects and sends them via the Events channel
func (w *Watcher) readEvents() {
var (
buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
n int // Number of bytes read with read()
errno error // Syscall errno
ok bool // For poller.wait
)
defer close(w.doneResp)
defer close(w.Errors)
defer close(w.Events)
defer unix.Close(w.fd)
defer w.poller.close()
for {
// See if we have been closed.
if w.isClosed() {
return
}
ok, errno = w.poller.wait()
if errno != nil {
select {
case w.Errors <- errno:
case <-w.done:
return
}
continue
}
if !ok {
continue
}
n, errno = unix.Read(w.fd, buf[:])
// If a signal interrupted execution, see if we've been asked to close, and try again.
// http://man7.org/linux/man-pages/man7/signal.7.html :
// "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable"
if errno == unix.EINTR {
continue
}
// unix.Read might have been woken up by Close. If so, we're done.
if w.isClosed() {
return
}
if n < unix.SizeofInotifyEvent {
var err error
if n == 0 {
// If EOF is received. This should really never happen.
err = io.EOF
} else if n < 0 {
// If an error occurred while reading.
err = errno
} else {
// Read was too short.
err = errors.New("notify: short read in readEvents()")
}
select {
case w.Errors <- err:
case <-w.done:
return
}
continue
}
var offset uint32
// We don't know how many events we just read into the buffer
// While the offset points to at least one whole event...
for offset <= uint32(n-unix.SizeofInotifyEvent) {
// Point "raw" to the event in the buffer
raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset]))
mask := uint32(raw.Mask)
nameLen := uint32(raw.Len)
if mask&unix.IN_Q_OVERFLOW != 0 {
select {
case w.Errors <- ErrEventOverflow:
case <-w.done:
return
}
}
// If the event happened to the watched directory or the watched file, the kernel
// doesn't append the filename to the event, but we would like to always fill the
// the "Name" field with a valid filename. We retrieve the path of the watch from
// the "paths" map.
w.mu.Lock()
name := w.paths[int(raw.Wd)]
w.mu.Unlock()
if nameLen > 0 {
// Point "bytes" at the first byte of the filename
bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))
// The filename is padded with NULL bytes. TrimRight() gets rid of those.
name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
}
event := newEvent(name, mask)
// Send the events that are not ignored on the events channel
if !event.ignoreLinux(w, raw.Wd, mask) {
select {
case w.Events <- event:
case <-w.done:
return
}
}
// Move to the next event in the buffer
offset += unix.SizeofInotifyEvent + nameLen
}
}
}
// Certain types of events can be "ignored" and not sent over the Events
// channel. Such as events marked ignore by the kernel, or MODIFY events
// against files that do not exist.
func (e *Event) ignoreLinux(w *Watcher, wd int32, mask uint32) bool {
// Ignore anything the inotify API says to ignore
if mask&unix.IN_IGNORED == unix.IN_IGNORED {
w.mu.Lock()
defer w.mu.Unlock()
name := w.paths[int(wd)]
delete(w.paths, int(wd))
delete(w.watches, name)
w.cv.Broadcast()
return true
}
// If the event is not a DELETE or RENAME, the file must exist.
// Otherwise the event is ignored.
// *Note*: this was put in place because it was seen that a MODIFY
// event was sent after the DELETE. This ignores that MODIFY and
// assumes a DELETE will come or has come if the file doesn't exist.
if !(e.Op&Remove == Remove || e.Op&Rename == Rename) {
_, statErr := os.Lstat(e.Name)
return os.IsNotExist(statErr)
}
return false
}
// newEvent returns an platform-independent Event based on an inotify mask.
func newEvent(name string, mask uint32) Event {
e := Event{Name: name}
if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
e.Op |= Create
}
if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE {
e.Op |= Remove
}
if mask&unix.IN_MODIFY == unix.IN_MODIFY {
e.Op |= Write
}
if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
e.Op |= Rename
}
if mask&unix.IN_ATTRIB == unix.IN_ATTRIB {
e.Op |= Chmod
}
return e
}

View File

@ -1,187 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux
package fsnotify
import (
"errors"
"golang.org/x/sys/unix"
)
type fdPoller struct {
fd int // File descriptor (as returned by the inotify_init() syscall)
epfd int // Epoll file descriptor
pipe [2]int // Pipe for waking up
}
func emptyPoller(fd int) *fdPoller {
poller := new(fdPoller)
poller.fd = fd
poller.epfd = -1
poller.pipe[0] = -1
poller.pipe[1] = -1
return poller
}
// Create a new inotify poller.
// This creates an inotify handler, and an epoll handler.
func newFdPoller(fd int) (*fdPoller, error) {
var errno error
poller := emptyPoller(fd)
defer func() {
if errno != nil {
poller.close()
}
}()
poller.fd = fd
// Create epoll fd
poller.epfd, errno = unix.EpollCreate1(0)
if poller.epfd == -1 {
return nil, errno
}
// Create pipe; pipe[0] is the read end, pipe[1] the write end.
errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK)
if errno != nil {
return nil, errno
}
// Register inotify fd with epoll
event := unix.EpollEvent{
Fd: int32(poller.fd),
Events: unix.EPOLLIN,
}
errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event)
if errno != nil {
return nil, errno
}
// Register pipe fd with epoll
event = unix.EpollEvent{
Fd: int32(poller.pipe[0]),
Events: unix.EPOLLIN,
}
errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event)
if errno != nil {
return nil, errno
}
return poller, nil
}
// Wait using epoll.
// Returns true if something is ready to be read,
// false if there is not.
func (poller *fdPoller) wait() (bool, error) {
// 3 possible events per fd, and 2 fds, makes a maximum of 6 events.
// I don't know whether epoll_wait returns the number of events returned,
// or the total number of events ready.
// I decided to catch both by making the buffer one larger than the maximum.
events := make([]unix.EpollEvent, 7)
for {
n, errno := unix.EpollWait(poller.epfd, events, -1)
if n == -1 {
if errno == unix.EINTR {
continue
}
return false, errno
}
if n == 0 {
// If there are no events, try again.
continue
}
if n > 6 {
// This should never happen. More events were returned than should be possible.
return false, errors.New("epoll_wait returned more events than I know what to do with")
}
ready := events[:n]
epollhup := false
epollerr := false
epollin := false
for _, event := range ready {
if event.Fd == int32(poller.fd) {
if event.Events&unix.EPOLLHUP != 0 {
// This should not happen, but if it does, treat it as a wakeup.
epollhup = true
}
if event.Events&unix.EPOLLERR != 0 {
// If an error is waiting on the file descriptor, we should pretend
// something is ready to read, and let unix.Read pick up the error.
epollerr = true
}
if event.Events&unix.EPOLLIN != 0 {
// There is data to read.
epollin = true
}
}
if event.Fd == int32(poller.pipe[0]) {
if event.Events&unix.EPOLLHUP != 0 {
// Write pipe descriptor was closed, by us. This means we're closing down the
// watcher, and we should wake up.
}
if event.Events&unix.EPOLLERR != 0 {
// If an error is waiting on the pipe file descriptor.
// This is an absolute mystery, and should never ever happen.
return false, errors.New("Error on the pipe descriptor.")
}
if event.Events&unix.EPOLLIN != 0 {
// This is a regular wakeup, so we have to clear the buffer.
err := poller.clearWake()
if err != nil {
return false, err
}
}
}
}
if epollhup || epollerr || epollin {
return true, nil
}
return false, nil
}
}
// Close the write end of the poller.
func (poller *fdPoller) wake() error {
buf := make([]byte, 1)
n, errno := unix.Write(poller.pipe[1], buf)
if n == -1 {
if errno == unix.EAGAIN {
// Buffer is full, poller will wake.
return nil
}
return errno
}
return nil
}
func (poller *fdPoller) clearWake() error {
// You have to be woken up a LOT in order to get to 100!
buf := make([]byte, 100)
n, errno := unix.Read(poller.pipe[0], buf)
if n == -1 {
if errno == unix.EAGAIN {
// Buffer is empty, someone else cleared our wake.
return nil
}
return errno
}
return nil
}
// Close all poller file descriptors, but not the one passed to it.
func (poller *fdPoller) close() {
if poller.pipe[1] != -1 {
unix.Close(poller.pipe[1])
}
if poller.pipe[0] != -1 {
unix.Close(poller.pipe[0])
}
if poller.epfd != -1 {
unix.Close(poller.epfd)
}
}

View File

@ -1,503 +0,0 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build freebsd openbsd netbsd dragonfly darwin
package fsnotify
import (
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sync"
"time"
"golang.org/x/sys/unix"
)
// Watcher watches a set of files, delivering events to a channel.
type Watcher struct {
Events chan Event
Errors chan error
done chan bool // Channel for sending a "quit message" to the reader goroutine
kq int // File descriptor (as returned by the kqueue() syscall).
mu sync.Mutex // Protects access to watcher data
watches map[string]int // Map of watched file descriptors (key: path).
externalWatches map[string]bool // Map of watches added by user of the library.
dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue.
paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events.
fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events).
isClosed bool // Set to true when Close() is first called
}
type pathInfo struct {
name string
isDir bool
}
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
func NewWatcher() (*Watcher, error) {
kq, err := kqueue()
if err != nil {
return nil, err
}
w := &Watcher{
kq: kq,
watches: make(map[string]int),
dirFlags: make(map[string]uint32),
paths: make(map[int]pathInfo),
fileExists: make(map[string]bool),
externalWatches: make(map[string]bool),
Events: make(chan Event),
Errors: make(chan error),
done: make(chan bool),
}
go w.readEvents()
return w, nil
}
// Close removes all watches and closes the events channel.
func (w *Watcher) Close() error {
w.mu.Lock()
if w.isClosed {
w.mu.Unlock()
return nil
}
w.isClosed = true
w.mu.Unlock()
// copy paths to remove while locked
w.mu.Lock()
var pathsToRemove = make([]string, 0, len(w.watches))
for name := range w.watches {
pathsToRemove = append(pathsToRemove, name)
}
w.mu.Unlock()
// unlock before calling Remove, which also locks
var err error
for _, name := range pathsToRemove {
if e := w.Remove(name); e != nil && err == nil {
err = e
}
}
// Send "quit" message to the reader goroutine:
w.done <- true
return nil
}
// Add starts watching the named file or directory (non-recursively).
func (w *Watcher) Add(name string) error {
w.mu.Lock()
w.externalWatches[name] = true
w.mu.Unlock()
_, err := w.addWatch(name, noteAllEvents)
return err
}
// Remove stops watching the the named file or directory (non-recursively).
func (w *Watcher) Remove(name string) error {
name = filepath.Clean(name)
w.mu.Lock()
watchfd, ok := w.watches[name]
w.mu.Unlock()
if !ok {
return fmt.Errorf("can't remove non-existent kevent watch for: %s", name)
}
const registerRemove = unix.EV_DELETE
if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil {
return err
}
unix.Close(watchfd)
w.mu.Lock()
isDir := w.paths[watchfd].isDir
delete(w.watches, name)
delete(w.paths, watchfd)
delete(w.dirFlags, name)
w.mu.Unlock()
// Find all watched paths that are in this directory that are not external.
if isDir {
var pathsToRemove []string
w.mu.Lock()
for _, path := range w.paths {
wdir, _ := filepath.Split(path.name)
if filepath.Clean(wdir) == name {
if !w.externalWatches[path.name] {
pathsToRemove = append(pathsToRemove, path.name)
}
}
}
w.mu.Unlock()
for _, name := range pathsToRemove {
// Since these are internal, not much sense in propagating error
// to the user, as that will just confuse them with an error about
// a path they did not explicitly watch themselves.
w.Remove(name)
}
}
return nil
}
// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME
// keventWaitTime to block on each read from kevent
var keventWaitTime = durationToTimespec(100 * time.Millisecond)
// addWatch adds name to the watched file set.
// The flags are interpreted as described in kevent(2).
// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks.
func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
var isDir bool
// Make ./name and name equivalent
name = filepath.Clean(name)
w.mu.Lock()
if w.isClosed {
w.mu.Unlock()
return "", errors.New("kevent instance already closed")
}
watchfd, alreadyWatching := w.watches[name]
// We already have a watch, but we can still override flags.
if alreadyWatching {
isDir = w.paths[watchfd].isDir
}
w.mu.Unlock()
if !alreadyWatching {
fi, err := os.Lstat(name)
if err != nil {
return "", err
}
// Don't watch sockets.
if fi.Mode()&os.ModeSocket == os.ModeSocket {
return "", nil
}
// Don't watch named pipes.
if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe {
return "", nil
}
// Follow Symlinks
// Unfortunately, Linux can add bogus symlinks to watch list without
// issue, and Windows can't do symlinks period (AFAIK). To maintain
// consistency, we will act like everything is fine. There will simply
// be no file events for broken symlinks.
// Hence the returns of nil on errors.
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
name, err = filepath.EvalSymlinks(name)
if err != nil {
return "", nil
}
w.mu.Lock()
_, alreadyWatching = w.watches[name]
w.mu.Unlock()
if alreadyWatching {
return name, nil
}
fi, err = os.Lstat(name)
if err != nil {
return "", nil
}
}
watchfd, err = unix.Open(name, openMode, 0700)
if watchfd == -1 {
return "", err
}
isDir = fi.IsDir()
}
const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE
if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil {
unix.Close(watchfd)
return "", err
}
if !alreadyWatching {
w.mu.Lock()
w.watches[name] = watchfd
w.paths[watchfd] = pathInfo{name: name, isDir: isDir}
w.mu.Unlock()
}
if isDir {
// Watch the directory if it has not been watched before,
// or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
w.mu.Lock()
watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE &&
(!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE)
// Store flags so this watch can be updated later
w.dirFlags[name] = flags
w.mu.Unlock()
if watchDir {
if err := w.watchDirectoryFiles(name); err != nil {
return "", err
}
}
}
return name, nil
}
// readEvents reads from kqueue and converts the received kevents into
// Event values that it sends down the Events channel.
func (w *Watcher) readEvents() {
eventBuffer := make([]unix.Kevent_t, 10)
for {
// See if there is a message on the "done" channel
select {
case <-w.done:
err := unix.Close(w.kq)
if err != nil {
w.Errors <- err
}
close(w.Events)
close(w.Errors)
return
default:
}
// Get new events
kevents, err := read(w.kq, eventBuffer, &keventWaitTime)
// EINTR is okay, the syscall was interrupted before timeout expired.
if err != nil && err != unix.EINTR {
w.Errors <- err
continue
}
// Flush the events we received to the Events channel
for len(kevents) > 0 {
kevent := &kevents[0]
watchfd := int(kevent.Ident)
mask := uint32(kevent.Fflags)
w.mu.Lock()
path := w.paths[watchfd]
w.mu.Unlock()
event := newEvent(path.name, mask)
if path.isDir && !(event.Op&Remove == Remove) {
// Double check to make sure the directory exists. This can happen when
// we do a rm -fr on a recursively watched folders and we receive a
// modification event first but the folder has been deleted and later
// receive the delete event
if _, err := os.Lstat(event.Name); os.IsNotExist(err) {
// mark is as delete event
event.Op |= Remove
}
}
if event.Op&Rename == Rename || event.Op&Remove == Remove {
w.Remove(event.Name)
w.mu.Lock()
delete(w.fileExists, event.Name)
w.mu.Unlock()
}
if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) {
w.sendDirectoryChangeEvents(event.Name)
} else {
// Send the event on the Events channel
w.Events <- event
}
if event.Op&Remove == Remove {
// Look for a file that may have overwritten this.
// For example, mv f1 f2 will delete f2, then create f2.
if path.isDir {
fileDir := filepath.Clean(event.Name)
w.mu.Lock()
_, found := w.watches[fileDir]
w.mu.Unlock()
if found {
// make sure the directory exists before we watch for changes. When we
// do a recursive watch and perform rm -fr, the parent directory might
// have gone missing, ignore the missing directory and let the
// upcoming delete event remove the watch from the parent directory.
if _, err := os.Lstat(fileDir); err == nil {
w.sendDirectoryChangeEvents(fileDir)
}
}
} else {
filePath := filepath.Clean(event.Name)
if fileInfo, err := os.Lstat(filePath); err == nil {
w.sendFileCreatedEventIfNew(filePath, fileInfo)
}
}
}
// Move to next event
kevents = kevents[1:]
}
}
}
// newEvent returns an platform-independent Event based on kqueue Fflags.
func newEvent(name string, mask uint32) Event {
e := Event{Name: name}
if mask&unix.NOTE_DELETE == unix.NOTE_DELETE {
e.Op |= Remove
}
if mask&unix.NOTE_WRITE == unix.NOTE_WRITE {
e.Op |= Write
}
if mask&unix.NOTE_RENAME == unix.NOTE_RENAME {
e.Op |= Rename
}
if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB {
e.Op |= Chmod
}
return e
}
func newCreateEvent(name string) Event {
return Event{Name: name, Op: Create}
}
// watchDirectoryFiles to mimic inotify when adding a watch on a directory
func (w *Watcher) watchDirectoryFiles(dirPath string) error {
// Get all files
files, err := ioutil.ReadDir(dirPath)
if err != nil {
return err
}
for _, fileInfo := range files {
filePath := filepath.Join(dirPath, fileInfo.Name())
filePath, err = w.internalWatch(filePath, fileInfo)
if err != nil {
return err
}
w.mu.Lock()
w.fileExists[filePath] = true
w.mu.Unlock()
}
return nil
}
// sendDirectoryEvents searches the directory for newly created files
// and sends them over the event channel. This functionality is to have
// the BSD version of fsnotify match Linux inotify which provides a
// create event for files created in a watched directory.
func (w *Watcher) sendDirectoryChangeEvents(dirPath string) {
// Get all files
files, err := ioutil.ReadDir(dirPath)
if err != nil {
w.Errors <- err
}
// Search for new files
for _, fileInfo := range files {
filePath := filepath.Join(dirPath, fileInfo.Name())
err := w.sendFileCreatedEventIfNew(filePath, fileInfo)
if err != nil {
return
}
}
}
// sendFileCreatedEvent sends a create event if the file isn't already being tracked.
func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) {
w.mu.Lock()
_, doesExist := w.fileExists[filePath]
w.mu.Unlock()
if !doesExist {
// Send create event
w.Events <- newCreateEvent(filePath)
}
// like watchDirectoryFiles (but without doing another ReadDir)
filePath, err = w.internalWatch(filePath, fileInfo)
if err != nil {
return err
}
w.mu.Lock()
w.fileExists[filePath] = true
w.mu.Unlock()
return nil
}
func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) {
if fileInfo.IsDir() {
// mimic Linux providing delete events for subdirectories
// but preserve the flags used if currently watching subdirectory
w.mu.Lock()
flags := w.dirFlags[name]
w.mu.Unlock()
flags |= unix.NOTE_DELETE | unix.NOTE_RENAME
return w.addWatch(name, flags)
}
// watch file to mimic Linux inotify
return w.addWatch(name, noteAllEvents)
}
// kqueue creates a new kernel event queue and returns a descriptor.
func kqueue() (kq int, err error) {
kq, err = unix.Kqueue()
if kq == -1 {
return kq, err
}
return kq, nil
}
// register events with the queue
func register(kq int, fds []int, flags int, fflags uint32) error {
changes := make([]unix.Kevent_t, len(fds))
for i, fd := range fds {
// SetKevent converts int to the platform-specific types:
unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags)
changes[i].Fflags = fflags
}
// register the events
success, err := unix.Kevent(kq, changes, nil, nil)
if success == -1 {
return err
}
return nil
}
// read retrieves pending events, or waits until an event occurs.
// A timeout of nil blocks indefinitely, while 0 polls the queue.
func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) {
n, err := unix.Kevent(kq, nil, events, timeout)
if err != nil {
return nil, err
}
return events[0:n], nil
}
// durationToTimespec prepares a timeout value
func durationToTimespec(d time.Duration) unix.Timespec {
return unix.NsecToTimespec(d.Nanoseconds())
}

View File

@ -1,561 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build windows
package fsnotify
import (
"errors"
"fmt"
"os"
"path/filepath"
"runtime"
"sync"
"syscall"
"unsafe"
)
// Watcher watches a set of files, delivering events to a channel.
type Watcher struct {
Events chan Event
Errors chan error
isClosed bool // Set to true when Close() is first called
mu sync.Mutex // Map access
port syscall.Handle // Handle to completion port
watches watchMap // Map of watches (key: i-number)
input chan *input // Inputs to the reader are sent on this channel
quit chan chan<- error
}
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
func NewWatcher() (*Watcher, error) {
port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0)
if e != nil {
return nil, os.NewSyscallError("CreateIoCompletionPort", e)
}
w := &Watcher{
port: port,
watches: make(watchMap),
input: make(chan *input, 1),
Events: make(chan Event, 50),
Errors: make(chan error),
quit: make(chan chan<- error, 1),
}
go w.readEvents()
return w, nil
}
// Close removes all watches and closes the events channel.
func (w *Watcher) Close() error {
if w.isClosed {
return nil
}
w.isClosed = true
// Send "quit" message to the reader goroutine
ch := make(chan error)
w.quit <- ch
if err := w.wakeupReader(); err != nil {
return err
}
return <-ch
}
// Add starts watching the named file or directory (non-recursively).
func (w *Watcher) Add(name string) error {
if w.isClosed {
return errors.New("watcher already closed")
}
in := &input{
op: opAddWatch,
path: filepath.Clean(name),
flags: sysFSALLEVENTS,
reply: make(chan error),
}
w.input <- in
if err := w.wakeupReader(); err != nil {
return err
}
return <-in.reply
}
// Remove stops watching the the named file or directory (non-recursively).
func (w *Watcher) Remove(name string) error {
in := &input{
op: opRemoveWatch,
path: filepath.Clean(name),
reply: make(chan error),
}
w.input <- in
if err := w.wakeupReader(); err != nil {
return err
}
return <-in.reply
}
const (
// Options for AddWatch
sysFSONESHOT = 0x80000000
sysFSONLYDIR = 0x1000000
// Events
sysFSACCESS = 0x1
sysFSALLEVENTS = 0xfff
sysFSATTRIB = 0x4
sysFSCLOSE = 0x18
sysFSCREATE = 0x100
sysFSDELETE = 0x200
sysFSDELETESELF = 0x400
sysFSMODIFY = 0x2
sysFSMOVE = 0xc0
sysFSMOVEDFROM = 0x40
sysFSMOVEDTO = 0x80
sysFSMOVESELF = 0x800
// Special events
sysFSIGNORED = 0x8000
sysFSQOVERFLOW = 0x4000
)
func newEvent(name string, mask uint32) Event {
e := Event{Name: name}
if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO {
e.Op |= Create
}
if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF {
e.Op |= Remove
}
if mask&sysFSMODIFY == sysFSMODIFY {
e.Op |= Write
}
if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM {
e.Op |= Rename
}
if mask&sysFSATTRIB == sysFSATTRIB {
e.Op |= Chmod
}
return e
}
const (
opAddWatch = iota
opRemoveWatch
)
const (
provisional uint64 = 1 << (32 + iota)
)
type input struct {
op int
path string
flags uint32
reply chan error
}
type inode struct {
handle syscall.Handle
volume uint32
index uint64
}
type watch struct {
ov syscall.Overlapped
ino *inode // i-number
path string // Directory path
mask uint64 // Directory itself is being watched with these notify flags
names map[string]uint64 // Map of names being watched and their notify flags
rename string // Remembers the old name while renaming a file
buf [4096]byte
}
type indexMap map[uint64]*watch
type watchMap map[uint32]indexMap
func (w *Watcher) wakeupReader() error {
e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil)
if e != nil {
return os.NewSyscallError("PostQueuedCompletionStatus", e)
}
return nil
}
func getDir(pathname string) (dir string, err error) {
attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname))
if e != nil {
return "", os.NewSyscallError("GetFileAttributes", e)
}
if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 {
dir = pathname
} else {
dir, _ = filepath.Split(pathname)
dir = filepath.Clean(dir)
}
return
}
func getIno(path string) (ino *inode, err error) {
h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path),
syscall.FILE_LIST_DIRECTORY,
syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
nil, syscall.OPEN_EXISTING,
syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0)
if e != nil {
return nil, os.NewSyscallError("CreateFile", e)
}
var fi syscall.ByHandleFileInformation
if e = syscall.GetFileInformationByHandle(h, &fi); e != nil {
syscall.CloseHandle(h)
return nil, os.NewSyscallError("GetFileInformationByHandle", e)
}
ino = &inode{
handle: h,
volume: fi.VolumeSerialNumber,
index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow),
}
return ino, nil
}
// Must run within the I/O thread.
func (m watchMap) get(ino *inode) *watch {
if i := m[ino.volume]; i != nil {
return i[ino.index]
}
return nil
}
// Must run within the I/O thread.
func (m watchMap) set(ino *inode, watch *watch) {
i := m[ino.volume]
if i == nil {
i = make(indexMap)
m[ino.volume] = i
}
i[ino.index] = watch
}
// Must run within the I/O thread.
func (w *Watcher) addWatch(pathname string, flags uint64) error {
dir, err := getDir(pathname)
if err != nil {
return err
}
if flags&sysFSONLYDIR != 0 && pathname != dir {
return nil
}
ino, err := getIno(dir)
if err != nil {
return err
}
w.mu.Lock()
watchEntry := w.watches.get(ino)
w.mu.Unlock()
if watchEntry == nil {
if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil {
syscall.CloseHandle(ino.handle)
return os.NewSyscallError("CreateIoCompletionPort", e)
}
watchEntry = &watch{
ino: ino,
path: dir,
names: make(map[string]uint64),
}
w.mu.Lock()
w.watches.set(ino, watchEntry)
w.mu.Unlock()
flags |= provisional
} else {
syscall.CloseHandle(ino.handle)
}
if pathname == dir {
watchEntry.mask |= flags
} else {
watchEntry.names[filepath.Base(pathname)] |= flags
}
if err = w.startRead(watchEntry); err != nil {
return err
}
if pathname == dir {
watchEntry.mask &= ^provisional
} else {
watchEntry.names[filepath.Base(pathname)] &= ^provisional
}
return nil
}
// Must run within the I/O thread.
func (w *Watcher) remWatch(pathname string) error {
dir, err := getDir(pathname)
if err != nil {
return err
}
ino, err := getIno(dir)
if err != nil {
return err
}
w.mu.Lock()
watch := w.watches.get(ino)
w.mu.Unlock()
if watch == nil {
return fmt.Errorf("can't remove non-existent watch for: %s", pathname)
}
if pathname == dir {
w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
watch.mask = 0
} else {
name := filepath.Base(pathname)
w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED)
delete(watch.names, name)
}
return w.startRead(watch)
}
// Must run within the I/O thread.
func (w *Watcher) deleteWatch(watch *watch) {
for name, mask := range watch.names {
if mask&provisional == 0 {
w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED)
}
delete(watch.names, name)
}
if watch.mask != 0 {
if watch.mask&provisional == 0 {
w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
}
watch.mask = 0
}
}
// Must run within the I/O thread.
func (w *Watcher) startRead(watch *watch) error {
if e := syscall.CancelIo(watch.ino.handle); e != nil {
w.Errors <- os.NewSyscallError("CancelIo", e)
w.deleteWatch(watch)
}
mask := toWindowsFlags(watch.mask)
for _, m := range watch.names {
mask |= toWindowsFlags(m)
}
if mask == 0 {
if e := syscall.CloseHandle(watch.ino.handle); e != nil {
w.Errors <- os.NewSyscallError("CloseHandle", e)
}
w.mu.Lock()
delete(w.watches[watch.ino.volume], watch.ino.index)
w.mu.Unlock()
return nil
}
e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0],
uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0)
if e != nil {
err := os.NewSyscallError("ReadDirectoryChanges", e)
if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
// Watched directory was probably removed
if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) {
if watch.mask&sysFSONESHOT != 0 {
watch.mask = 0
}
}
err = nil
}
w.deleteWatch(watch)
w.startRead(watch)
return err
}
return nil
}
// readEvents reads from the I/O completion port, converts the
// received events into Event objects and sends them via the Events channel.
// Entry point to the I/O thread.
func (w *Watcher) readEvents() {
var (
n, key uint32
ov *syscall.Overlapped
)
runtime.LockOSThread()
for {
e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE)
watch := (*watch)(unsafe.Pointer(ov))
if watch == nil {
select {
case ch := <-w.quit:
w.mu.Lock()
var indexes []indexMap
for _, index := range w.watches {
indexes = append(indexes, index)
}
w.mu.Unlock()
for _, index := range indexes {
for _, watch := range index {
w.deleteWatch(watch)
w.startRead(watch)
}
}
var err error
if e := syscall.CloseHandle(w.port); e != nil {
err = os.NewSyscallError("CloseHandle", e)
}
close(w.Events)
close(w.Errors)
ch <- err
return
case in := <-w.input:
switch in.op {
case opAddWatch:
in.reply <- w.addWatch(in.path, uint64(in.flags))
case opRemoveWatch:
in.reply <- w.remWatch(in.path)
}
default:
}
continue
}
switch e {
case syscall.ERROR_MORE_DATA:
if watch == nil {
w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")
} else {
// The i/o succeeded but the buffer is full.
// In theory we should be building up a full packet.
// In practice we can get away with just carrying on.
n = uint32(unsafe.Sizeof(watch.buf))
}
case syscall.ERROR_ACCESS_DENIED:
// Watched directory was probably removed
w.sendEvent(watch.path, watch.mask&sysFSDELETESELF)
w.deleteWatch(watch)
w.startRead(watch)
continue
case syscall.ERROR_OPERATION_ABORTED:
// CancelIo was called on this handle
continue
default:
w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e)
continue
case nil:
}
var offset uint32
for {
if n == 0 {
w.Events <- newEvent("", sysFSQOVERFLOW)
w.Errors <- errors.New("short read in readEvents()")
break
}
// Point "raw" to the event in the buffer
raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset]))
buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName))
name := syscall.UTF16ToString(buf[:raw.FileNameLength/2])
fullname := filepath.Join(watch.path, name)
var mask uint64
switch raw.Action {
case syscall.FILE_ACTION_REMOVED:
mask = sysFSDELETESELF
case syscall.FILE_ACTION_MODIFIED:
mask = sysFSMODIFY
case syscall.FILE_ACTION_RENAMED_OLD_NAME:
watch.rename = name
case syscall.FILE_ACTION_RENAMED_NEW_NAME:
if watch.names[watch.rename] != 0 {
watch.names[name] |= watch.names[watch.rename]
delete(watch.names, watch.rename)
mask = sysFSMOVESELF
}
}
sendNameEvent := func() {
if w.sendEvent(fullname, watch.names[name]&mask) {
if watch.names[name]&sysFSONESHOT != 0 {
delete(watch.names, name)
}
}
}
if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME {
sendNameEvent()
}
if raw.Action == syscall.FILE_ACTION_REMOVED {
w.sendEvent(fullname, watch.names[name]&sysFSIGNORED)
delete(watch.names, name)
}
if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) {
if watch.mask&sysFSONESHOT != 0 {
watch.mask = 0
}
}
if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME {
fullname = filepath.Join(watch.path, watch.rename)
sendNameEvent()
}
// Move to the next event in the buffer
if raw.NextEntryOffset == 0 {
break
}
offset += raw.NextEntryOffset
// Error!
if offset >= n {
w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.")
break
}
}
if err := w.startRead(watch); err != nil {
w.Errors <- err
}
}
}
func (w *Watcher) sendEvent(name string, mask uint64) bool {
if mask == 0 {
return false
}
event := newEvent(name, uint32(mask))
select {
case ch := <-w.quit:
w.quit <- ch
case w.Events <- event:
}
return true
}
func toWindowsFlags(mask uint64) uint32 {
var m uint32
if mask&sysFSACCESS != 0 {
m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS
}
if mask&sysFSMODIFY != 0 {
m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE
}
if mask&sysFSATTRIB != 0 {
m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES
}
if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 {
m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME
}
return m
}
func toFSnotifyFlags(action uint32) uint64 {
switch action {
case syscall.FILE_ACTION_ADDED:
return sysFSCREATE
case syscall.FILE_ACTION_REMOVED:
return sysFSDELETE
case syscall.FILE_ACTION_MODIFIED:
return sysFSMODIFY
case syscall.FILE_ACTION_RENAMED_OLD_NAME:
return sysFSMOVEDFROM
case syscall.FILE_ACTION_RENAMED_NEW_NAME:
return sysFSMOVEDTO
}
return 0
}

View File

@ -1,5 +1,4 @@
Copyright (c) 2012 The Go Authors. All rights reserved. Copyright (c) 2009 The Go Authors. All rights reserved.
Copyright (c) 2012 fsnotify Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are modification, are permitted provided that the following conditions are

22
vendor/golang.org/x/crypto/PATENTS generated vendored Normal file
View File

@ -0,0 +1,22 @@
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the Go project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.

21
vendor/golang.org/x/crypto/README.md generated vendored Normal file
View File

@ -0,0 +1,21 @@
# Go Cryptography
This repository holds supplementary Go cryptography libraries.
## Download/Install
The easiest way to install is to run `go get -u golang.org/x/crypto/...`. You
can also manually git clone the repository to `$GOPATH/src/golang.org/x/crypto`.
## Report Issues / Send Patches
This repository uses Gerrit for code changes. To learn how to submit changes to
this repository, see https://golang.org/doc/contribute.html.
The main issue tracker for the crypto repository is located at
https://github.com/golang/go/issues. Prefix your issue with "x/crypto:" in the
subject line, so it is easy to find.
Note that contributions to the cryptography package receive additional scrutiny
due to their sensitive nature. Patches may take longer than normal to receive
feedback.

8
vendor/golang.org/x/crypto/curve25519/const_amd64.h generated vendored Normal file
View File

@ -0,0 +1,8 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This code was translated into a form compatible with 6a from the public
// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
#define REDMASK51 0x0007FFFFFFFFFFFF

20
vendor/golang.org/x/crypto/curve25519/const_amd64.s generated vendored Normal file
View File

@ -0,0 +1,20 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This code was translated into a form compatible with 6a from the public
// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
// +build amd64,!gccgo,!appengine
// These constants cannot be encoded in non-MOVQ immediates.
// We access them directly from memory instead.
DATA ·_121666_213(SB)/8, $996687872
GLOBL ·_121666_213(SB), 8, $8
DATA ·_2P0(SB)/8, $0xFFFFFFFFFFFDA
GLOBL ·_2P0(SB), 8, $8
DATA ·_2P1234(SB)/8, $0xFFFFFFFFFFFFE
GLOBL ·_2P1234(SB), 8, $8

65
vendor/golang.org/x/crypto/curve25519/cswap_amd64.s generated vendored Normal file
View File

@ -0,0 +1,65 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build amd64,!gccgo,!appengine
// func cswap(inout *[4][5]uint64, v uint64)
TEXT ·cswap(SB),7,$0
MOVQ inout+0(FP),DI
MOVQ v+8(FP),SI
SUBQ $1, SI
NOTQ SI
MOVQ SI, X15
PSHUFD $0x44, X15, X15
MOVOU 0(DI), X0
MOVOU 16(DI), X2
MOVOU 32(DI), X4
MOVOU 48(DI), X6
MOVOU 64(DI), X8
MOVOU 80(DI), X1
MOVOU 96(DI), X3
MOVOU 112(DI), X5
MOVOU 128(DI), X7
MOVOU 144(DI), X9
MOVO X1, X10
MOVO X3, X11
MOVO X5, X12
MOVO X7, X13
MOVO X9, X14
PXOR X0, X10
PXOR X2, X11
PXOR X4, X12
PXOR X6, X13
PXOR X8, X14
PAND X15, X10
PAND X15, X11
PAND X15, X12
PAND X15, X13
PAND X15, X14
PXOR X10, X0
PXOR X10, X1
PXOR X11, X2
PXOR X11, X3
PXOR X12, X4
PXOR X12, X5
PXOR X13, X6
PXOR X13, X7
PXOR X14, X8
PXOR X14, X9
MOVOU X0, 0(DI)
MOVOU X2, 16(DI)
MOVOU X4, 32(DI)
MOVOU X6, 48(DI)
MOVOU X8, 64(DI)
MOVOU X1, 80(DI)
MOVOU X3, 96(DI)
MOVOU X5, 112(DI)
MOVOU X7, 128(DI)
MOVOU X9, 144(DI)
RET

834
vendor/golang.org/x/crypto/curve25519/curve25519.go generated vendored Normal file
View File

@ -0,0 +1,834 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// We have an implementation in amd64 assembly so this code is only run on
// non-amd64 platforms. The amd64 assembly does not support gccgo.
// +build !amd64 gccgo appengine
package curve25519
import (
"encoding/binary"
)
// This code is a port of the public domain, "ref10" implementation of
// curve25519 from SUPERCOP 20130419 by D. J. Bernstein.
// fieldElement represents an element of the field GF(2^255 - 19). An element
// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77
// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on
// context.
type fieldElement [10]int32
func feZero(fe *fieldElement) {
for i := range fe {
fe[i] = 0
}
}
func feOne(fe *fieldElement) {
feZero(fe)
fe[0] = 1
}
func feAdd(dst, a, b *fieldElement) {
for i := range dst {
dst[i] = a[i] + b[i]
}
}
func feSub(dst, a, b *fieldElement) {
for i := range dst {
dst[i] = a[i] - b[i]
}
}
func feCopy(dst, src *fieldElement) {
for i := range dst {
dst[i] = src[i]
}
}
// feCSwap replaces (f,g) with (g,f) if b == 1; replaces (f,g) with (f,g) if b == 0.
//
// Preconditions: b in {0,1}.
func feCSwap(f, g *fieldElement, b int32) {
b = -b
for i := range f {
t := b & (f[i] ^ g[i])
f[i] ^= t
g[i] ^= t
}
}
// load3 reads a 24-bit, little-endian value from in.
func load3(in []byte) int64 {
var r int64
r = int64(in[0])
r |= int64(in[1]) << 8
r |= int64(in[2]) << 16
return r
}
// load4 reads a 32-bit, little-endian value from in.
func load4(in []byte) int64 {
return int64(binary.LittleEndian.Uint32(in))
}
func feFromBytes(dst *fieldElement, src *[32]byte) {
h0 := load4(src[:])
h1 := load3(src[4:]) << 6
h2 := load3(src[7:]) << 5
h3 := load3(src[10:]) << 3
h4 := load3(src[13:]) << 2
h5 := load4(src[16:])
h6 := load3(src[20:]) << 7
h7 := load3(src[23:]) << 5
h8 := load3(src[26:]) << 4
h9 := load3(src[29:]) << 2
var carry [10]int64
carry[9] = (h9 + 1<<24) >> 25
h0 += carry[9] * 19
h9 -= carry[9] << 25
carry[1] = (h1 + 1<<24) >> 25
h2 += carry[1]
h1 -= carry[1] << 25
carry[3] = (h3 + 1<<24) >> 25
h4 += carry[3]
h3 -= carry[3] << 25
carry[5] = (h5 + 1<<24) >> 25
h6 += carry[5]
h5 -= carry[5] << 25
carry[7] = (h7 + 1<<24) >> 25
h8 += carry[7]
h7 -= carry[7] << 25
carry[0] = (h0 + 1<<25) >> 26
h1 += carry[0]
h0 -= carry[0] << 26
carry[2] = (h2 + 1<<25) >> 26
h3 += carry[2]
h2 -= carry[2] << 26
carry[4] = (h4 + 1<<25) >> 26
h5 += carry[4]
h4 -= carry[4] << 26
carry[6] = (h6 + 1<<25) >> 26
h7 += carry[6]
h6 -= carry[6] << 26
carry[8] = (h8 + 1<<25) >> 26
h9 += carry[8]
h8 -= carry[8] << 26
dst[0] = int32(h0)
dst[1] = int32(h1)
dst[2] = int32(h2)
dst[3] = int32(h3)
dst[4] = int32(h4)
dst[5] = int32(h5)
dst[6] = int32(h6)
dst[7] = int32(h7)
dst[8] = int32(h8)
dst[9] = int32(h9)
}
// feToBytes marshals h to s.
// Preconditions:
// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
//
// Write p=2^255-19; q=floor(h/p).
// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))).
//
// Proof:
// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4.
// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4.
//
// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9).
// Then 0<y<1.
//
// Write r=h-pq.
// Have 0<=r<=p-1=2^255-20.
// Thus 0<=r+19(2^-255)r<r+19(2^-255)2^255<=2^255-1.
//
// Write x=r+19(2^-255)r+y.
// Then 0<x<2^255 so floor(2^(-255)x) = 0 so floor(q+2^(-255)x) = q.
//
// Have q+2^(-255)x = 2^(-255)(h + 19 2^(-25) h9 + 2^(-1))
// so floor(2^(-255)(h + 19 2^(-25) h9 + 2^(-1))) = q.
func feToBytes(s *[32]byte, h *fieldElement) {
var carry [10]int32
q := (19*h[9] + (1 << 24)) >> 25
q = (h[0] + q) >> 26
q = (h[1] + q) >> 25
q = (h[2] + q) >> 26
q = (h[3] + q) >> 25
q = (h[4] + q) >> 26
q = (h[5] + q) >> 25
q = (h[6] + q) >> 26
q = (h[7] + q) >> 25
q = (h[8] + q) >> 26
q = (h[9] + q) >> 25
// Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20.
h[0] += 19 * q
// Goal: Output h-2^255 q, which is between 0 and 2^255-20.
carry[0] = h[0] >> 26
h[1] += carry[0]
h[0] -= carry[0] << 26
carry[1] = h[1] >> 25
h[2] += carry[1]
h[1] -= carry[1] << 25
carry[2] = h[2] >> 26
h[3] += carry[2]
h[2] -= carry[2] << 26
carry[3] = h[3] >> 25
h[4] += carry[3]
h[3] -= carry[3] << 25
carry[4] = h[4] >> 26
h[5] += carry[4]
h[4] -= carry[4] << 26
carry[5] = h[5] >> 25
h[6] += carry[5]
h[5] -= carry[5] << 25
carry[6] = h[6] >> 26
h[7] += carry[6]
h[6] -= carry[6] << 26
carry[7] = h[7] >> 25
h[8] += carry[7]
h[7] -= carry[7] << 25
carry[8] = h[8] >> 26
h[9] += carry[8]
h[8] -= carry[8] << 26
carry[9] = h[9] >> 25
h[9] -= carry[9] << 25
// h10 = carry9
// Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20.
// Have h[0]+...+2^230 h[9] between 0 and 2^255-1;
// evidently 2^255 h10-2^255 q = 0.
// Goal: Output h[0]+...+2^230 h[9].
s[0] = byte(h[0] >> 0)
s[1] = byte(h[0] >> 8)
s[2] = byte(h[0] >> 16)
s[3] = byte((h[0] >> 24) | (h[1] << 2))
s[4] = byte(h[1] >> 6)
s[5] = byte(h[1] >> 14)
s[6] = byte((h[1] >> 22) | (h[2] << 3))
s[7] = byte(h[2] >> 5)
s[8] = byte(h[2] >> 13)
s[9] = byte((h[2] >> 21) | (h[3] << 5))
s[10] = byte(h[3] >> 3)
s[11] = byte(h[3] >> 11)
s[12] = byte((h[3] >> 19) | (h[4] << 6))
s[13] = byte(h[4] >> 2)
s[14] = byte(h[4] >> 10)
s[15] = byte(h[4] >> 18)
s[16] = byte(h[5] >> 0)
s[17] = byte(h[5] >> 8)
s[18] = byte(h[5] >> 16)
s[19] = byte((h[5] >> 24) | (h[6] << 1))
s[20] = byte(h[6] >> 7)
s[21] = byte(h[6] >> 15)
s[22] = byte((h[6] >> 23) | (h[7] << 3))
s[23] = byte(h[7] >> 5)
s[24] = byte(h[7] >> 13)
s[25] = byte((h[7] >> 21) | (h[8] << 4))
s[26] = byte(h[8] >> 4)
s[27] = byte(h[8] >> 12)
s[28] = byte((h[8] >> 20) | (h[9] << 6))
s[29] = byte(h[9] >> 2)
s[30] = byte(h[9] >> 10)
s[31] = byte(h[9] >> 18)
}
// feMul calculates h = f * g
// Can overlap h with f or g.
//
// Preconditions:
// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
//
// Postconditions:
// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
//
// Notes on implementation strategy:
//
// Using schoolbook multiplication.
// Karatsuba would save a little in some cost models.
//
// Most multiplications by 2 and 19 are 32-bit precomputations;
// cheaper than 64-bit postcomputations.
//
// There is one remaining multiplication by 19 in the carry chain;
// one *19 precomputation can be merged into this,
// but the resulting data flow is considerably less clean.
//
// There are 12 carries below.
// 10 of them are 2-way parallelizable and vectorizable.
// Can get away with 11 carries, but then data flow is much deeper.
//
// With tighter constraints on inputs can squeeze carries into int32.
func feMul(h, f, g *fieldElement) {
f0 := f[0]
f1 := f[1]
f2 := f[2]
f3 := f[3]
f4 := f[4]
f5 := f[5]
f6 := f[6]
f7 := f[7]
f8 := f[8]
f9 := f[9]
g0 := g[0]
g1 := g[1]
g2 := g[2]
g3 := g[3]
g4 := g[4]
g5 := g[5]
g6 := g[6]
g7 := g[7]
g8 := g[8]
g9 := g[9]
g1_19 := 19 * g1 // 1.4*2^29
g2_19 := 19 * g2 // 1.4*2^30; still ok
g3_19 := 19 * g3
g4_19 := 19 * g4
g5_19 := 19 * g5
g6_19 := 19 * g6
g7_19 := 19 * g7
g8_19 := 19 * g8
g9_19 := 19 * g9
f1_2 := 2 * f1
f3_2 := 2 * f3
f5_2 := 2 * f5
f7_2 := 2 * f7
f9_2 := 2 * f9
f0g0 := int64(f0) * int64(g0)
f0g1 := int64(f0) * int64(g1)
f0g2 := int64(f0) * int64(g2)
f0g3 := int64(f0) * int64(g3)
f0g4 := int64(f0) * int64(g4)
f0g5 := int64(f0) * int64(g5)
f0g6 := int64(f0) * int64(g6)
f0g7 := int64(f0) * int64(g7)
f0g8 := int64(f0) * int64(g8)
f0g9 := int64(f0) * int64(g9)
f1g0 := int64(f1) * int64(g0)
f1g1_2 := int64(f1_2) * int64(g1)
f1g2 := int64(f1) * int64(g2)
f1g3_2 := int64(f1_2) * int64(g3)
f1g4 := int64(f1) * int64(g4)
f1g5_2 := int64(f1_2) * int64(g5)
f1g6 := int64(f1) * int64(g6)
f1g7_2 := int64(f1_2) * int64(g7)
f1g8 := int64(f1) * int64(g8)
f1g9_38 := int64(f1_2) * int64(g9_19)
f2g0 := int64(f2) * int64(g0)
f2g1 := int64(f2) * int64(g1)
f2g2 := int64(f2) * int64(g2)
f2g3 := int64(f2) * int64(g3)
f2g4 := int64(f2) * int64(g4)
f2g5 := int64(f2) * int64(g5)
f2g6 := int64(f2) * int64(g6)
f2g7 := int64(f2) * int64(g7)
f2g8_19 := int64(f2) * int64(g8_19)
f2g9_19 := int64(f2) * int64(g9_19)
f3g0 := int64(f3) * int64(g0)
f3g1_2 := int64(f3_2) * int64(g1)
f3g2 := int64(f3) * int64(g2)
f3g3_2 := int64(f3_2) * int64(g3)
f3g4 := int64(f3) * int64(g4)
f3g5_2 := int64(f3_2) * int64(g5)
f3g6 := int64(f3) * int64(g6)
f3g7_38 := int64(f3_2) * int64(g7_19)
f3g8_19 := int64(f3) * int64(g8_19)
f3g9_38 := int64(f3_2) * int64(g9_19)
f4g0 := int64(f4) * int64(g0)
f4g1 := int64(f4) * int64(g1)
f4g2 := int64(f4) * int64(g2)
f4g3 := int64(f4) * int64(g3)
f4g4 := int64(f4) * int64(g4)
f4g5 := int64(f4) * int64(g5)
f4g6_19 := int64(f4) * int64(g6_19)
f4g7_19 := int64(f4) * int64(g7_19)
f4g8_19 := int64(f4) * int64(g8_19)
f4g9_19 := int64(f4) * int64(g9_19)
f5g0 := int64(f5) * int64(g0)
f5g1_2 := int64(f5_2) * int64(g1)
f5g2 := int64(f5) * int64(g2)
f5g3_2 := int64(f5_2) * int64(g3)
f5g4 := int64(f5) * int64(g4)
f5g5_38 := int64(f5_2) * int64(g5_19)
f5g6_19 := int64(f5) * int64(g6_19)
f5g7_38 := int64(f5_2) * int64(g7_19)
f5g8_19 := int64(f5) * int64(g8_19)
f5g9_38 := int64(f5_2) * int64(g9_19)
f6g0 := int64(f6) * int64(g0)
f6g1 := int64(f6) * int64(g1)
f6g2 := int64(f6) * int64(g2)
f6g3 := int64(f6) * int64(g3)
f6g4_19 := int64(f6) * int64(g4_19)
f6g5_19 := int64(f6) * int64(g5_19)
f6g6_19 := int64(f6) * int64(g6_19)
f6g7_19 := int64(f6) * int64(g7_19)
f6g8_19 := int64(f6) * int64(g8_19)
f6g9_19 := int64(f6) * int64(g9_19)
f7g0 := int64(f7) * int64(g0)
f7g1_2 := int64(f7_2) * int64(g1)
f7g2 := int64(f7) * int64(g2)
f7g3_38 := int64(f7_2) * int64(g3_19)
f7g4_19 := int64(f7) * int64(g4_19)
f7g5_38 := int64(f7_2) * int64(g5_19)
f7g6_19 := int64(f7) * int64(g6_19)
f7g7_38 := int64(f7_2) * int64(g7_19)
f7g8_19 := int64(f7) * int64(g8_19)
f7g9_38 := int64(f7_2) * int64(g9_19)
f8g0 := int64(f8) * int64(g0)
f8g1 := int64(f8) * int64(g1)
f8g2_19 := int64(f8) * int64(g2_19)
f8g3_19 := int64(f8) * int64(g3_19)
f8g4_19 := int64(f8) * int64(g4_19)
f8g5_19 := int64(f8) * int64(g5_19)
f8g6_19 := int64(f8) * int64(g6_19)
f8g7_19 := int64(f8) * int64(g7_19)
f8g8_19 := int64(f8) * int64(g8_19)
f8g9_19 := int64(f8) * int64(g9_19)
f9g0 := int64(f9) * int64(g0)
f9g1_38 := int64(f9_2) * int64(g1_19)
f9g2_19 := int64(f9) * int64(g2_19)
f9g3_38 := int64(f9_2) * int64(g3_19)
f9g4_19 := int64(f9) * int64(g4_19)
f9g5_38 := int64(f9_2) * int64(g5_19)
f9g6_19 := int64(f9) * int64(g6_19)
f9g7_38 := int64(f9_2) * int64(g7_19)
f9g8_19 := int64(f9) * int64(g8_19)
f9g9_38 := int64(f9_2) * int64(g9_19)
h0 := f0g0 + f1g9_38 + f2g8_19 + f3g7_38 + f4g6_19 + f5g5_38 + f6g4_19 + f7g3_38 + f8g2_19 + f9g1_38
h1 := f0g1 + f1g0 + f2g9_19 + f3g8_19 + f4g7_19 + f5g6_19 + f6g5_19 + f7g4_19 + f8g3_19 + f9g2_19
h2 := f0g2 + f1g1_2 + f2g0 + f3g9_38 + f4g8_19 + f5g7_38 + f6g6_19 + f7g5_38 + f8g4_19 + f9g3_38
h3 := f0g3 + f1g2 + f2g1 + f3g0 + f4g9_19 + f5g8_19 + f6g7_19 + f7g6_19 + f8g5_19 + f9g4_19
h4 := f0g4 + f1g3_2 + f2g2 + f3g1_2 + f4g0 + f5g9_38 + f6g8_19 + f7g7_38 + f8g6_19 + f9g5_38
h5 := f0g5 + f1g4 + f2g3 + f3g2 + f4g1 + f5g0 + f6g9_19 + f7g8_19 + f8g7_19 + f9g6_19
h6 := f0g6 + f1g5_2 + f2g4 + f3g3_2 + f4g2 + f5g1_2 + f6g0 + f7g9_38 + f8g8_19 + f9g7_38
h7 := f0g7 + f1g6 + f2g5 + f3g4 + f4g3 + f5g2 + f6g1 + f7g0 + f8g9_19 + f9g8_19
h8 := f0g8 + f1g7_2 + f2g6 + f3g5_2 + f4g4 + f5g3_2 + f6g2 + f7g1_2 + f8g0 + f9g9_38
h9 := f0g9 + f1g8 + f2g7 + f3g6 + f4g5 + f5g4 + f6g3 + f7g2 + f8g1 + f9g0
var carry [10]int64
// |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38))
// i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8
// |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19))
// i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9
carry[0] = (h0 + (1 << 25)) >> 26
h1 += carry[0]
h0 -= carry[0] << 26
carry[4] = (h4 + (1 << 25)) >> 26
h5 += carry[4]
h4 -= carry[4] << 26
// |h0| <= 2^25
// |h4| <= 2^25
// |h1| <= 1.51*2^58
// |h5| <= 1.51*2^58
carry[1] = (h1 + (1 << 24)) >> 25
h2 += carry[1]
h1 -= carry[1] << 25
carry[5] = (h5 + (1 << 24)) >> 25
h6 += carry[5]
h5 -= carry[5] << 25
// |h1| <= 2^24; from now on fits into int32
// |h5| <= 2^24; from now on fits into int32
// |h2| <= 1.21*2^59
// |h6| <= 1.21*2^59
carry[2] = (h2 + (1 << 25)) >> 26
h3 += carry[2]
h2 -= carry[2] << 26
carry[6] = (h6 + (1 << 25)) >> 26
h7 += carry[6]
h6 -= carry[6] << 26
// |h2| <= 2^25; from now on fits into int32 unchanged
// |h6| <= 2^25; from now on fits into int32 unchanged
// |h3| <= 1.51*2^58
// |h7| <= 1.51*2^58
carry[3] = (h3 + (1 << 24)) >> 25
h4 += carry[3]
h3 -= carry[3] << 25
carry[7] = (h7 + (1 << 24)) >> 25
h8 += carry[7]
h7 -= carry[7] << 25
// |h3| <= 2^24; from now on fits into int32 unchanged
// |h7| <= 2^24; from now on fits into int32 unchanged
// |h4| <= 1.52*2^33
// |h8| <= 1.52*2^33
carry[4] = (h4 + (1 << 25)) >> 26
h5 += carry[4]
h4 -= carry[4] << 26
carry[8] = (h8 + (1 << 25)) >> 26
h9 += carry[8]
h8 -= carry[8] << 26
// |h4| <= 2^25; from now on fits into int32 unchanged
// |h8| <= 2^25; from now on fits into int32 unchanged
// |h5| <= 1.01*2^24
// |h9| <= 1.51*2^58
carry[9] = (h9 + (1 << 24)) >> 25
h0 += carry[9] * 19
h9 -= carry[9] << 25
// |h9| <= 2^24; from now on fits into int32 unchanged
// |h0| <= 1.8*2^37
carry[0] = (h0 + (1 << 25)) >> 26
h1 += carry[0]
h0 -= carry[0] << 26
// |h0| <= 2^25; from now on fits into int32 unchanged
// |h1| <= 1.01*2^24
h[0] = int32(h0)
h[1] = int32(h1)
h[2] = int32(h2)
h[3] = int32(h3)
h[4] = int32(h4)
h[5] = int32(h5)
h[6] = int32(h6)
h[7] = int32(h7)
h[8] = int32(h8)
h[9] = int32(h9)
}
// feSquare calculates h = f*f. Can overlap h with f.
//
// Preconditions:
// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
//
// Postconditions:
// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
func feSquare(h, f *fieldElement) {
f0 := f[0]
f1 := f[1]
f2 := f[2]
f3 := f[3]
f4 := f[4]
f5 := f[5]
f6 := f[6]
f7 := f[7]
f8 := f[8]
f9 := f[9]
f0_2 := 2 * f0
f1_2 := 2 * f1
f2_2 := 2 * f2
f3_2 := 2 * f3
f4_2 := 2 * f4
f5_2 := 2 * f5
f6_2 := 2 * f6
f7_2 := 2 * f7
f5_38 := 38 * f5 // 1.31*2^30
f6_19 := 19 * f6 // 1.31*2^30
f7_38 := 38 * f7 // 1.31*2^30
f8_19 := 19 * f8 // 1.31*2^30
f9_38 := 38 * f9 // 1.31*2^30
f0f0 := int64(f0) * int64(f0)
f0f1_2 := int64(f0_2) * int64(f1)
f0f2_2 := int64(f0_2) * int64(f2)
f0f3_2 := int64(f0_2) * int64(f3)
f0f4_2 := int64(f0_2) * int64(f4)
f0f5_2 := int64(f0_2) * int64(f5)
f0f6_2 := int64(f0_2) * int64(f6)
f0f7_2 := int64(f0_2) * int64(f7)
f0f8_2 := int64(f0_2) * int64(f8)
f0f9_2 := int64(f0_2) * int64(f9)
f1f1_2 := int64(f1_2) * int64(f1)
f1f2_2 := int64(f1_2) * int64(f2)
f1f3_4 := int64(f1_2) * int64(f3_2)
f1f4_2 := int64(f1_2) * int64(f4)
f1f5_4 := int64(f1_2) * int64(f5_2)
f1f6_2 := int64(f1_2) * int64(f6)
f1f7_4 := int64(f1_2) * int64(f7_2)
f1f8_2 := int64(f1_2) * int64(f8)
f1f9_76 := int64(f1_2) * int64(f9_38)
f2f2 := int64(f2) * int64(f2)
f2f3_2 := int64(f2_2) * int64(f3)
f2f4_2 := int64(f2_2) * int64(f4)
f2f5_2 := int64(f2_2) * int64(f5)
f2f6_2 := int64(f2_2) * int64(f6)
f2f7_2 := int64(f2_2) * int64(f7)
f2f8_38 := int64(f2_2) * int64(f8_19)
f2f9_38 := int64(f2) * int64(f9_38)
f3f3_2 := int64(f3_2) * int64(f3)
f3f4_2 := int64(f3_2) * int64(f4)
f3f5_4 := int64(f3_2) * int64(f5_2)
f3f6_2 := int64(f3_2) * int64(f6)
f3f7_76 := int64(f3_2) * int64(f7_38)
f3f8_38 := int64(f3_2) * int64(f8_19)
f3f9_76 := int64(f3_2) * int64(f9_38)
f4f4 := int64(f4) * int64(f4)
f4f5_2 := int64(f4_2) * int64(f5)
f4f6_38 := int64(f4_2) * int64(f6_19)
f4f7_38 := int64(f4) * int64(f7_38)
f4f8_38 := int64(f4_2) * int64(f8_19)
f4f9_38 := int64(f4) * int64(f9_38)
f5f5_38 := int64(f5) * int64(f5_38)
f5f6_38 := int64(f5_2) * int64(f6_19)
f5f7_76 := int64(f5_2) * int64(f7_38)
f5f8_38 := int64(f5_2) * int64(f8_19)
f5f9_76 := int64(f5_2) * int64(f9_38)
f6f6_19 := int64(f6) * int64(f6_19)
f6f7_38 := int64(f6) * int64(f7_38)
f6f8_38 := int64(f6_2) * int64(f8_19)
f6f9_38 := int64(f6) * int64(f9_38)
f7f7_38 := int64(f7) * int64(f7_38)
f7f8_38 := int64(f7_2) * int64(f8_19)
f7f9_76 := int64(f7_2) * int64(f9_38)
f8f8_19 := int64(f8) * int64(f8_19)
f8f9_38 := int64(f8) * int64(f9_38)
f9f9_38 := int64(f9) * int64(f9_38)
h0 := f0f0 + f1f9_76 + f2f8_38 + f3f7_76 + f4f6_38 + f5f5_38
h1 := f0f1_2 + f2f9_38 + f3f8_38 + f4f7_38 + f5f6_38
h2 := f0f2_2 + f1f1_2 + f3f9_76 + f4f8_38 + f5f7_76 + f6f6_19
h3 := f0f3_2 + f1f2_2 + f4f9_38 + f5f8_38 + f6f7_38
h4 := f0f4_2 + f1f3_4 + f2f2 + f5f9_76 + f6f8_38 + f7f7_38
h5 := f0f5_2 + f1f4_2 + f2f3_2 + f6f9_38 + f7f8_38
h6 := f0f6_2 + f1f5_4 + f2f4_2 + f3f3_2 + f7f9_76 + f8f8_19
h7 := f0f7_2 + f1f6_2 + f2f5_2 + f3f4_2 + f8f9_38
h8 := f0f8_2 + f1f7_4 + f2f6_2 + f3f5_4 + f4f4 + f9f9_38
h9 := f0f9_2 + f1f8_2 + f2f7_2 + f3f6_2 + f4f5_2
var carry [10]int64
carry[0] = (h0 + (1 << 25)) >> 26
h1 += carry[0]
h0 -= carry[0] << 26
carry[4] = (h4 + (1 << 25)) >> 26
h5 += carry[4]
h4 -= carry[4] << 26
carry[1] = (h1 + (1 << 24)) >> 25
h2 += carry[1]
h1 -= carry[1] << 25
carry[5] = (h5 + (1 << 24)) >> 25
h6 += carry[5]
h5 -= carry[5] << 25
carry[2] = (h2 + (1 << 25)) >> 26
h3 += carry[2]
h2 -= carry[2] << 26
carry[6] = (h6 + (1 << 25)) >> 26
h7 += carry[6]
h6 -= carry[6] << 26
carry[3] = (h3 + (1 << 24)) >> 25
h4 += carry[3]
h3 -= carry[3] << 25
carry[7] = (h7 + (1 << 24)) >> 25
h8 += carry[7]
h7 -= carry[7] << 25
carry[4] = (h4 + (1 << 25)) >> 26
h5 += carry[4]
h4 -= carry[4] << 26
carry[8] = (h8 + (1 << 25)) >> 26
h9 += carry[8]
h8 -= carry[8] << 26
carry[9] = (h9 + (1 << 24)) >> 25
h0 += carry[9] * 19
h9 -= carry[9] << 25
carry[0] = (h0 + (1 << 25)) >> 26
h1 += carry[0]
h0 -= carry[0] << 26
h[0] = int32(h0)
h[1] = int32(h1)
h[2] = int32(h2)
h[3] = int32(h3)
h[4] = int32(h4)
h[5] = int32(h5)
h[6] = int32(h6)
h[7] = int32(h7)
h[8] = int32(h8)
h[9] = int32(h9)
}
// feMul121666 calculates h = f * 121666. Can overlap h with f.
//
// Preconditions:
// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
//
// Postconditions:
// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
func feMul121666(h, f *fieldElement) {
h0 := int64(f[0]) * 121666
h1 := int64(f[1]) * 121666
h2 := int64(f[2]) * 121666
h3 := int64(f[3]) * 121666
h4 := int64(f[4]) * 121666
h5 := int64(f[5]) * 121666
h6 := int64(f[6]) * 121666
h7 := int64(f[7]) * 121666
h8 := int64(f[8]) * 121666
h9 := int64(f[9]) * 121666
var carry [10]int64
carry[9] = (h9 + (1 << 24)) >> 25
h0 += carry[9] * 19
h9 -= carry[9] << 25
carry[1] = (h1 + (1 << 24)) >> 25
h2 += carry[1]
h1 -= carry[1] << 25
carry[3] = (h3 + (1 << 24)) >> 25
h4 += carry[3]
h3 -= carry[3] << 25
carry[5] = (h5 + (1 << 24)) >> 25
h6 += carry[5]
h5 -= carry[5] << 25
carry[7] = (h7 + (1 << 24)) >> 25
h8 += carry[7]
h7 -= carry[7] << 25
carry[0] = (h0 + (1 << 25)) >> 26
h1 += carry[0]
h0 -= carry[0] << 26
carry[2] = (h2 + (1 << 25)) >> 26
h3 += carry[2]
h2 -= carry[2] << 26
carry[4] = (h4 + (1 << 25)) >> 26
h5 += carry[4]
h4 -= carry[4] << 26
carry[6] = (h6 + (1 << 25)) >> 26
h7 += carry[6]
h6 -= carry[6] << 26
carry[8] = (h8 + (1 << 25)) >> 26
h9 += carry[8]
h8 -= carry[8] << 26
h[0] = int32(h0)
h[1] = int32(h1)
h[2] = int32(h2)
h[3] = int32(h3)
h[4] = int32(h4)
h[5] = int32(h5)
h[6] = int32(h6)
h[7] = int32(h7)
h[8] = int32(h8)
h[9] = int32(h9)
}
// feInvert sets out = z^-1.
func feInvert(out, z *fieldElement) {
var t0, t1, t2, t3 fieldElement
var i int
feSquare(&t0, z)
for i = 1; i < 1; i++ {
feSquare(&t0, &t0)
}
feSquare(&t1, &t0)
for i = 1; i < 2; i++ {
feSquare(&t1, &t1)
}
feMul(&t1, z, &t1)
feMul(&t0, &t0, &t1)
feSquare(&t2, &t0)
for i = 1; i < 1; i++ {
feSquare(&t2, &t2)
}
feMul(&t1, &t1, &t2)
feSquare(&t2, &t1)
for i = 1; i < 5; i++ {
feSquare(&t2, &t2)
}
feMul(&t1, &t2, &t1)
feSquare(&t2, &t1)
for i = 1; i < 10; i++ {
feSquare(&t2, &t2)
}
feMul(&t2, &t2, &t1)
feSquare(&t3, &t2)
for i = 1; i < 20; i++ {
feSquare(&t3, &t3)
}
feMul(&t2, &t3, &t2)
feSquare(&t2, &t2)
for i = 1; i < 10; i++ {
feSquare(&t2, &t2)
}
feMul(&t1, &t2, &t1)
feSquare(&t2, &t1)
for i = 1; i < 50; i++ {
feSquare(&t2, &t2)
}
feMul(&t2, &t2, &t1)
feSquare(&t3, &t2)
for i = 1; i < 100; i++ {
feSquare(&t3, &t3)
}
feMul(&t2, &t3, &t2)
feSquare(&t2, &t2)
for i = 1; i < 50; i++ {
feSquare(&t2, &t2)
}
feMul(&t1, &t2, &t1)
feSquare(&t1, &t1)
for i = 1; i < 5; i++ {
feSquare(&t1, &t1)
}
feMul(out, &t1, &t0)
}
func scalarMult(out, in, base *[32]byte) {
var e [32]byte
copy(e[:], in[:])
e[0] &= 248
e[31] &= 127
e[31] |= 64
var x1, x2, z2, x3, z3, tmp0, tmp1 fieldElement
feFromBytes(&x1, base)
feOne(&x2)
feCopy(&x3, &x1)
feOne(&z3)
swap := int32(0)
for pos := 254; pos >= 0; pos-- {
b := e[pos/8] >> uint(pos&7)
b &= 1
swap ^= int32(b)
feCSwap(&x2, &x3, swap)
feCSwap(&z2, &z3, swap)
swap = int32(b)
feSub(&tmp0, &x3, &z3)
feSub(&tmp1, &x2, &z2)
feAdd(&x2, &x2, &z2)
feAdd(&z2, &x3, &z3)
feMul(&z3, &tmp0, &x2)
feMul(&z2, &z2, &tmp1)
feSquare(&tmp0, &tmp1)
feSquare(&tmp1, &x2)
feAdd(&x3, &z3, &z2)
feSub(&z2, &z3, &z2)
feMul(&x2, &tmp1, &tmp0)
feSub(&tmp1, &tmp1, &tmp0)
feSquare(&z2, &z2)
feMul121666(&z3, &tmp1)
feSquare(&x3, &x3)
feAdd(&tmp0, &tmp0, &z3)
feMul(&z3, &x1, &z2)
feMul(&z2, &tmp1, &tmp0)
}
feCSwap(&x2, &x3, swap)
feCSwap(&z2, &z3, swap)
feInvert(&z2, &z2)
feMul(&x2, &x2, &z2)
feToBytes(out, &x2)
}

23
vendor/golang.org/x/crypto/curve25519/doc.go generated vendored Normal file
View File

@ -0,0 +1,23 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package curve25519 provides an implementation of scalar multiplication on
// the elliptic curve known as curve25519. See https://cr.yp.to/ecdh.html
package curve25519 // import "golang.org/x/crypto/curve25519"
// basePoint is the x coordinate of the generator of the curve.
var basePoint = [32]byte{9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
// ScalarMult sets dst to the product in*base where dst and base are the x
// coordinates of group points and all values are in little-endian form.
func ScalarMult(dst, in, base *[32]byte) {
scalarMult(dst, in, base)
}
// ScalarBaseMult sets dst to the product in*base where dst and base are the x
// coordinates of group points, base is the standard generator and all values
// are in little-endian form.
func ScalarBaseMult(dst, in *[32]byte) {
ScalarMult(dst, in, &basePoint)
}

73
vendor/golang.org/x/crypto/curve25519/freeze_amd64.s generated vendored Normal file
View File

@ -0,0 +1,73 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This code was translated into a form compatible with 6a from the public
// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
// +build amd64,!gccgo,!appengine
#include "const_amd64.h"
// func freeze(inout *[5]uint64)
TEXT ·freeze(SB),7,$0-8
MOVQ inout+0(FP), DI
MOVQ 0(DI),SI
MOVQ 8(DI),DX
MOVQ 16(DI),CX
MOVQ 24(DI),R8
MOVQ 32(DI),R9
MOVQ $REDMASK51,AX
MOVQ AX,R10
SUBQ $18,R10
MOVQ $3,R11
REDUCELOOP:
MOVQ SI,R12
SHRQ $51,R12
ANDQ AX,SI
ADDQ R12,DX
MOVQ DX,R12
SHRQ $51,R12
ANDQ AX,DX
ADDQ R12,CX
MOVQ CX,R12
SHRQ $51,R12
ANDQ AX,CX
ADDQ R12,R8
MOVQ R8,R12
SHRQ $51,R12
ANDQ AX,R8
ADDQ R12,R9
MOVQ R9,R12
SHRQ $51,R12
ANDQ AX,R9
IMUL3Q $19,R12,R12
ADDQ R12,SI
SUBQ $1,R11
JA REDUCELOOP
MOVQ $1,R12
CMPQ R10,SI
CMOVQLT R11,R12
CMPQ AX,DX
CMOVQNE R11,R12
CMPQ AX,CX
CMOVQNE R11,R12
CMPQ AX,R8
CMOVQNE R11,R12
CMPQ AX,R9
CMOVQNE R11,R12
NEGQ R12
ANDQ R12,AX
ANDQ R12,R10
SUBQ R10,SI
SUBQ AX,DX
SUBQ AX,CX
SUBQ AX,R8
SUBQ AX,R9
MOVQ SI,0(DI)
MOVQ DX,8(DI)
MOVQ CX,16(DI)
MOVQ R8,24(DI)
MOVQ R9,32(DI)
RET

1377
vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,240 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build amd64,!gccgo,!appengine
package curve25519
// These functions are implemented in the .s files. The names of the functions
// in the rest of the file are also taken from the SUPERCOP sources to help
// people following along.
//go:noescape
func cswap(inout *[5]uint64, v uint64)
//go:noescape
func ladderstep(inout *[5][5]uint64)
//go:noescape
func freeze(inout *[5]uint64)
//go:noescape
func mul(dest, a, b *[5]uint64)
//go:noescape
func square(out, in *[5]uint64)
// mladder uses a Montgomery ladder to calculate (xr/zr) *= s.
func mladder(xr, zr *[5]uint64, s *[32]byte) {
var work [5][5]uint64
work[0] = *xr
setint(&work[1], 1)
setint(&work[2], 0)
work[3] = *xr
setint(&work[4], 1)
j := uint(6)
var prevbit byte
for i := 31; i >= 0; i-- {
for j < 8 {
bit := ((*s)[i] >> j) & 1
swap := bit ^ prevbit
prevbit = bit
cswap(&work[1], uint64(swap))
ladderstep(&work)
j--
}
j = 7
}
*xr = work[1]
*zr = work[2]
}
func scalarMult(out, in, base *[32]byte) {
var e [32]byte
copy(e[:], (*in)[:])
e[0] &= 248
e[31] &= 127
e[31] |= 64
var t, z [5]uint64
unpack(&t, base)
mladder(&t, &z, &e)
invert(&z, &z)
mul(&t, &t, &z)
pack(out, &t)
}
func setint(r *[5]uint64, v uint64) {
r[0] = v
r[1] = 0
r[2] = 0
r[3] = 0
r[4] = 0
}
// unpack sets r = x where r consists of 5, 51-bit limbs in little-endian
// order.
func unpack(r *[5]uint64, x *[32]byte) {
r[0] = uint64(x[0]) |
uint64(x[1])<<8 |
uint64(x[2])<<16 |
uint64(x[3])<<24 |
uint64(x[4])<<32 |
uint64(x[5])<<40 |
uint64(x[6]&7)<<48
r[1] = uint64(x[6])>>3 |
uint64(x[7])<<5 |
uint64(x[8])<<13 |
uint64(x[9])<<21 |
uint64(x[10])<<29 |
uint64(x[11])<<37 |
uint64(x[12]&63)<<45
r[2] = uint64(x[12])>>6 |
uint64(x[13])<<2 |
uint64(x[14])<<10 |
uint64(x[15])<<18 |
uint64(x[16])<<26 |
uint64(x[17])<<34 |
uint64(x[18])<<42 |
uint64(x[19]&1)<<50
r[3] = uint64(x[19])>>1 |
uint64(x[20])<<7 |
uint64(x[21])<<15 |
uint64(x[22])<<23 |
uint64(x[23])<<31 |
uint64(x[24])<<39 |
uint64(x[25]&15)<<47
r[4] = uint64(x[25])>>4 |
uint64(x[26])<<4 |
uint64(x[27])<<12 |
uint64(x[28])<<20 |
uint64(x[29])<<28 |
uint64(x[30])<<36 |
uint64(x[31]&127)<<44
}
// pack sets out = x where out is the usual, little-endian form of the 5,
// 51-bit limbs in x.
func pack(out *[32]byte, x *[5]uint64) {
t := *x
freeze(&t)
out[0] = byte(t[0])
out[1] = byte(t[0] >> 8)
out[2] = byte(t[0] >> 16)
out[3] = byte(t[0] >> 24)
out[4] = byte(t[0] >> 32)
out[5] = byte(t[0] >> 40)
out[6] = byte(t[0] >> 48)
out[6] ^= byte(t[1]<<3) & 0xf8
out[7] = byte(t[1] >> 5)
out[8] = byte(t[1] >> 13)
out[9] = byte(t[1] >> 21)
out[10] = byte(t[1] >> 29)
out[11] = byte(t[1] >> 37)
out[12] = byte(t[1] >> 45)
out[12] ^= byte(t[2]<<6) & 0xc0
out[13] = byte(t[2] >> 2)
out[14] = byte(t[2] >> 10)
out[15] = byte(t[2] >> 18)
out[16] = byte(t[2] >> 26)
out[17] = byte(t[2] >> 34)
out[18] = byte(t[2] >> 42)
out[19] = byte(t[2] >> 50)
out[19] ^= byte(t[3]<<1) & 0xfe
out[20] = byte(t[3] >> 7)
out[21] = byte(t[3] >> 15)
out[22] = byte(t[3] >> 23)
out[23] = byte(t[3] >> 31)
out[24] = byte(t[3] >> 39)
out[25] = byte(t[3] >> 47)
out[25] ^= byte(t[4]<<4) & 0xf0
out[26] = byte(t[4] >> 4)
out[27] = byte(t[4] >> 12)
out[28] = byte(t[4] >> 20)
out[29] = byte(t[4] >> 28)
out[30] = byte(t[4] >> 36)
out[31] = byte(t[4] >> 44)
}
// invert calculates r = x^-1 mod p using Fermat's little theorem.
func invert(r *[5]uint64, x *[5]uint64) {
var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t [5]uint64
square(&z2, x) /* 2 */
square(&t, &z2) /* 4 */
square(&t, &t) /* 8 */
mul(&z9, &t, x) /* 9 */
mul(&z11, &z9, &z2) /* 11 */
square(&t, &z11) /* 22 */
mul(&z2_5_0, &t, &z9) /* 2^5 - 2^0 = 31 */
square(&t, &z2_5_0) /* 2^6 - 2^1 */
for i := 1; i < 5; i++ { /* 2^20 - 2^10 */
square(&t, &t)
}
mul(&z2_10_0, &t, &z2_5_0) /* 2^10 - 2^0 */
square(&t, &z2_10_0) /* 2^11 - 2^1 */
for i := 1; i < 10; i++ { /* 2^20 - 2^10 */
square(&t, &t)
}
mul(&z2_20_0, &t, &z2_10_0) /* 2^20 - 2^0 */
square(&t, &z2_20_0) /* 2^21 - 2^1 */
for i := 1; i < 20; i++ { /* 2^40 - 2^20 */
square(&t, &t)
}
mul(&t, &t, &z2_20_0) /* 2^40 - 2^0 */
square(&t, &t) /* 2^41 - 2^1 */
for i := 1; i < 10; i++ { /* 2^50 - 2^10 */
square(&t, &t)
}
mul(&z2_50_0, &t, &z2_10_0) /* 2^50 - 2^0 */
square(&t, &z2_50_0) /* 2^51 - 2^1 */
for i := 1; i < 50; i++ { /* 2^100 - 2^50 */
square(&t, &t)
}
mul(&z2_100_0, &t, &z2_50_0) /* 2^100 - 2^0 */
square(&t, &z2_100_0) /* 2^101 - 2^1 */
for i := 1; i < 100; i++ { /* 2^200 - 2^100 */
square(&t, &t)
}
mul(&t, &t, &z2_100_0) /* 2^200 - 2^0 */
square(&t, &t) /* 2^201 - 2^1 */
for i := 1; i < 50; i++ { /* 2^250 - 2^50 */
square(&t, &t)
}
mul(&t, &t, &z2_50_0) /* 2^250 - 2^0 */
square(&t, &t) /* 2^251 - 2^1 */
square(&t, &t) /* 2^252 - 2^2 */
square(&t, &t) /* 2^253 - 2^3 */
square(&t, &t) /* 2^254 - 2^4 */
square(&t, &t) /* 2^255 - 2^5 */
mul(r, &t, &z11) /* 2^255 - 21 */
}

169
vendor/golang.org/x/crypto/curve25519/mul_amd64.s generated vendored Normal file
View File

@ -0,0 +1,169 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This code was translated into a form compatible with 6a from the public
// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
// +build amd64,!gccgo,!appengine
#include "const_amd64.h"
// func mul(dest, a, b *[5]uint64)
TEXT ·mul(SB),0,$16-24
MOVQ dest+0(FP), DI
MOVQ a+8(FP), SI
MOVQ b+16(FP), DX
MOVQ DX,CX
MOVQ 24(SI),DX
IMUL3Q $19,DX,AX
MOVQ AX,0(SP)
MULQ 16(CX)
MOVQ AX,R8
MOVQ DX,R9
MOVQ 32(SI),DX
IMUL3Q $19,DX,AX
MOVQ AX,8(SP)
MULQ 8(CX)
ADDQ AX,R8
ADCQ DX,R9
MOVQ 0(SI),AX
MULQ 0(CX)
ADDQ AX,R8
ADCQ DX,R9
MOVQ 0(SI),AX
MULQ 8(CX)
MOVQ AX,R10
MOVQ DX,R11
MOVQ 0(SI),AX
MULQ 16(CX)
MOVQ AX,R12
MOVQ DX,R13
MOVQ 0(SI),AX
MULQ 24(CX)
MOVQ AX,R14
MOVQ DX,R15
MOVQ 0(SI),AX
MULQ 32(CX)
MOVQ AX,BX
MOVQ DX,BP
MOVQ 8(SI),AX
MULQ 0(CX)
ADDQ AX,R10
ADCQ DX,R11
MOVQ 8(SI),AX
MULQ 8(CX)
ADDQ AX,R12
ADCQ DX,R13
MOVQ 8(SI),AX
MULQ 16(CX)
ADDQ AX,R14
ADCQ DX,R15
MOVQ 8(SI),AX
MULQ 24(CX)
ADDQ AX,BX
ADCQ DX,BP
MOVQ 8(SI),DX
IMUL3Q $19,DX,AX
MULQ 32(CX)
ADDQ AX,R8
ADCQ DX,R9
MOVQ 16(SI),AX
MULQ 0(CX)
ADDQ AX,R12
ADCQ DX,R13
MOVQ 16(SI),AX
MULQ 8(CX)
ADDQ AX,R14
ADCQ DX,R15
MOVQ 16(SI),AX
MULQ 16(CX)
ADDQ AX,BX
ADCQ DX,BP
MOVQ 16(SI),DX
IMUL3Q $19,DX,AX
MULQ 24(CX)
ADDQ AX,R8
ADCQ DX,R9
MOVQ 16(SI),DX
IMUL3Q $19,DX,AX
MULQ 32(CX)
ADDQ AX,R10
ADCQ DX,R11
MOVQ 24(SI),AX
MULQ 0(CX)
ADDQ AX,R14
ADCQ DX,R15
MOVQ 24(SI),AX
MULQ 8(CX)
ADDQ AX,BX
ADCQ DX,BP
MOVQ 0(SP),AX
MULQ 24(CX)
ADDQ AX,R10
ADCQ DX,R11
MOVQ 0(SP),AX
MULQ 32(CX)
ADDQ AX,R12
ADCQ DX,R13
MOVQ 32(SI),AX
MULQ 0(CX)
ADDQ AX,BX
ADCQ DX,BP
MOVQ 8(SP),AX
MULQ 16(CX)
ADDQ AX,R10
ADCQ DX,R11
MOVQ 8(SP),AX
MULQ 24(CX)
ADDQ AX,R12
ADCQ DX,R13
MOVQ 8(SP),AX
MULQ 32(CX)
ADDQ AX,R14
ADCQ DX,R15
MOVQ $REDMASK51,SI
SHLQ $13,R9:R8
ANDQ SI,R8
SHLQ $13,R11:R10
ANDQ SI,R10
ADDQ R9,R10
SHLQ $13,R13:R12
ANDQ SI,R12
ADDQ R11,R12
SHLQ $13,R15:R14
ANDQ SI,R14
ADDQ R13,R14
SHLQ $13,BP:BX
ANDQ SI,BX
ADDQ R15,BX
IMUL3Q $19,BP,DX
ADDQ DX,R8
MOVQ R8,DX
SHRQ $51,DX
ADDQ R10,DX
MOVQ DX,CX
SHRQ $51,DX
ANDQ SI,R8
ADDQ R12,DX
MOVQ DX,R9
SHRQ $51,DX
ANDQ SI,CX
ADDQ R14,DX
MOVQ DX,AX
SHRQ $51,DX
ANDQ SI,R9
ADDQ BX,DX
MOVQ DX,R10
SHRQ $51,DX
ANDQ SI,AX
IMUL3Q $19,DX,DX
ADDQ DX,R8
ANDQ SI,R10
MOVQ R8,0(DI)
MOVQ CX,8(DI)
MOVQ R9,16(DI)
MOVQ AX,24(DI)
MOVQ R10,32(DI)
RET

132
vendor/golang.org/x/crypto/curve25519/square_amd64.s generated vendored Normal file
View File

@ -0,0 +1,132 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This code was translated into a form compatible with 6a from the public
// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
// +build amd64,!gccgo,!appengine
#include "const_amd64.h"
// func square(out, in *[5]uint64)
TEXT ·square(SB),7,$0-16
MOVQ out+0(FP), DI
MOVQ in+8(FP), SI
MOVQ 0(SI),AX
MULQ 0(SI)
MOVQ AX,CX
MOVQ DX,R8
MOVQ 0(SI),AX
SHLQ $1,AX
MULQ 8(SI)
MOVQ AX,R9
MOVQ DX,R10
MOVQ 0(SI),AX
SHLQ $1,AX
MULQ 16(SI)
MOVQ AX,R11
MOVQ DX,R12
MOVQ 0(SI),AX
SHLQ $1,AX
MULQ 24(SI)
MOVQ AX,R13
MOVQ DX,R14
MOVQ 0(SI),AX
SHLQ $1,AX
MULQ 32(SI)
MOVQ AX,R15
MOVQ DX,BX
MOVQ 8(SI),AX
MULQ 8(SI)
ADDQ AX,R11
ADCQ DX,R12
MOVQ 8(SI),AX
SHLQ $1,AX
MULQ 16(SI)
ADDQ AX,R13
ADCQ DX,R14
MOVQ 8(SI),AX
SHLQ $1,AX
MULQ 24(SI)
ADDQ AX,R15
ADCQ DX,BX
MOVQ 8(SI),DX
IMUL3Q $38,DX,AX
MULQ 32(SI)
ADDQ AX,CX
ADCQ DX,R8
MOVQ 16(SI),AX
MULQ 16(SI)
ADDQ AX,R15
ADCQ DX,BX
MOVQ 16(SI),DX
IMUL3Q $38,DX,AX
MULQ 24(SI)
ADDQ AX,CX
ADCQ DX,R8
MOVQ 16(SI),DX
IMUL3Q $38,DX,AX
MULQ 32(SI)
ADDQ AX,R9
ADCQ DX,R10
MOVQ 24(SI),DX
IMUL3Q $19,DX,AX
MULQ 24(SI)
ADDQ AX,R9
ADCQ DX,R10
MOVQ 24(SI),DX
IMUL3Q $38,DX,AX
MULQ 32(SI)
ADDQ AX,R11
ADCQ DX,R12
MOVQ 32(SI),DX
IMUL3Q $19,DX,AX
MULQ 32(SI)
ADDQ AX,R13
ADCQ DX,R14
MOVQ $REDMASK51,SI
SHLQ $13,R8:CX
ANDQ SI,CX
SHLQ $13,R10:R9
ANDQ SI,R9
ADDQ R8,R9
SHLQ $13,R12:R11
ANDQ SI,R11
ADDQ R10,R11
SHLQ $13,R14:R13
ANDQ SI,R13
ADDQ R12,R13
SHLQ $13,BX:R15
ANDQ SI,R15
ADDQ R14,R15
IMUL3Q $19,BX,DX
ADDQ DX,CX
MOVQ CX,DX
SHRQ $51,DX
ADDQ R9,DX
ANDQ SI,CX
MOVQ DX,R8
SHRQ $51,DX
ADDQ R11,DX
ANDQ SI,R8
MOVQ DX,R9
SHRQ $51,DX
ADDQ R13,DX
ANDQ SI,R9
MOVQ DX,AX
SHRQ $51,DX
ADDQ R15,DX
ANDQ SI,AX
MOVQ DX,R10
SHRQ $51,DX
IMUL3Q $19,DX,DX
ADDQ DX,CX
ANDQ SI,R10
MOVQ CX,0(DI)
MOVQ R8,8(DI)
MOVQ R9,16(DI)
MOVQ AX,24(DI)
MOVQ R10,32(DI)
RET

951
vendor/golang.org/x/crypto/ssh/terminal/terminal.go generated vendored Normal file
View File

@ -0,0 +1,951 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package terminal
import (
"bytes"
"io"
"sync"
"unicode/utf8"
)
// EscapeCodes contains escape sequences that can be written to the terminal in
// order to achieve different styles of text.
type EscapeCodes struct {
// Foreground colors
Black, Red, Green, Yellow, Blue, Magenta, Cyan, White []byte
// Reset all attributes
Reset []byte
}
var vt100EscapeCodes = EscapeCodes{
Black: []byte{keyEscape, '[', '3', '0', 'm'},
Red: []byte{keyEscape, '[', '3', '1', 'm'},
Green: []byte{keyEscape, '[', '3', '2', 'm'},
Yellow: []byte{keyEscape, '[', '3', '3', 'm'},
Blue: []byte{keyEscape, '[', '3', '4', 'm'},
Magenta: []byte{keyEscape, '[', '3', '5', 'm'},
Cyan: []byte{keyEscape, '[', '3', '6', 'm'},
White: []byte{keyEscape, '[', '3', '7', 'm'},
Reset: []byte{keyEscape, '[', '0', 'm'},
}
// Terminal contains the state for running a VT100 terminal that is capable of
// reading lines of input.
type Terminal struct {
// AutoCompleteCallback, if non-null, is called for each keypress with
// the full input line and the current position of the cursor (in
// bytes, as an index into |line|). If it returns ok=false, the key
// press is processed normally. Otherwise it returns a replacement line
// and the new cursor position.
AutoCompleteCallback func(line string, pos int, key rune) (newLine string, newPos int, ok bool)
// Escape contains a pointer to the escape codes for this terminal.
// It's always a valid pointer, although the escape codes themselves
// may be empty if the terminal doesn't support them.
Escape *EscapeCodes
// lock protects the terminal and the state in this object from
// concurrent processing of a key press and a Write() call.
lock sync.Mutex
c io.ReadWriter
prompt []rune
// line is the current line being entered.
line []rune
// pos is the logical position of the cursor in line
pos int
// echo is true if local echo is enabled
echo bool
// pasteActive is true iff there is a bracketed paste operation in
// progress.
pasteActive bool
// cursorX contains the current X value of the cursor where the left
// edge is 0. cursorY contains the row number where the first row of
// the current line is 0.
cursorX, cursorY int
// maxLine is the greatest value of cursorY so far.
maxLine int
termWidth, termHeight int
// outBuf contains the terminal data to be sent.
outBuf []byte
// remainder contains the remainder of any partial key sequences after
// a read. It aliases into inBuf.
remainder []byte
inBuf [256]byte
// history contains previously entered commands so that they can be
// accessed with the up and down keys.
history stRingBuffer
// historyIndex stores the currently accessed history entry, where zero
// means the immediately previous entry.
historyIndex int
// When navigating up and down the history it's possible to return to
// the incomplete, initial line. That value is stored in
// historyPending.
historyPending string
}
// NewTerminal runs a VT100 terminal on the given ReadWriter. If the ReadWriter is
// a local terminal, that terminal must first have been put into raw mode.
// prompt is a string that is written at the start of each input line (i.e.
// "> ").
func NewTerminal(c io.ReadWriter, prompt string) *Terminal {
return &Terminal{
Escape: &vt100EscapeCodes,
c: c,
prompt: []rune(prompt),
termWidth: 80,
termHeight: 24,
echo: true,
historyIndex: -1,
}
}
const (
keyCtrlD = 4
keyCtrlU = 21
keyEnter = '\r'
keyEscape = 27
keyBackspace = 127
keyUnknown = 0xd800 /* UTF-16 surrogate area */ + iota
keyUp
keyDown
keyLeft
keyRight
keyAltLeft
keyAltRight
keyHome
keyEnd
keyDeleteWord
keyDeleteLine
keyClearScreen
keyPasteStart
keyPasteEnd
)
var (
crlf = []byte{'\r', '\n'}
pasteStart = []byte{keyEscape, '[', '2', '0', '0', '~'}
pasteEnd = []byte{keyEscape, '[', '2', '0', '1', '~'}
)
// bytesToKey tries to parse a key sequence from b. If successful, it returns
// the key and the remainder of the input. Otherwise it returns utf8.RuneError.
func bytesToKey(b []byte, pasteActive bool) (rune, []byte) {
if len(b) == 0 {
return utf8.RuneError, nil
}
if !pasteActive {
switch b[0] {
case 1: // ^A
return keyHome, b[1:]
case 5: // ^E
return keyEnd, b[1:]
case 8: // ^H
return keyBackspace, b[1:]
case 11: // ^K
return keyDeleteLine, b[1:]
case 12: // ^L
return keyClearScreen, b[1:]
case 23: // ^W
return keyDeleteWord, b[1:]
}
}
if b[0] != keyEscape {
if !utf8.FullRune(b) {
return utf8.RuneError, b
}
r, l := utf8.DecodeRune(b)
return r, b[l:]
}
if !pasteActive && len(b) >= 3 && b[0] == keyEscape && b[1] == '[' {
switch b[2] {
case 'A':
return keyUp, b[3:]
case 'B':
return keyDown, b[3:]
case 'C':
return keyRight, b[3:]
case 'D':
return keyLeft, b[3:]
case 'H':
return keyHome, b[3:]
case 'F':
return keyEnd, b[3:]
}
}
if !pasteActive && len(b) >= 6 && b[0] == keyEscape && b[1] == '[' && b[2] == '1' && b[3] == ';' && b[4] == '3' {
switch b[5] {
case 'C':
return keyAltRight, b[6:]
case 'D':
return keyAltLeft, b[6:]
}
}
if !pasteActive && len(b) >= 6 && bytes.Equal(b[:6], pasteStart) {
return keyPasteStart, b[6:]
}
if pasteActive && len(b) >= 6 && bytes.Equal(b[:6], pasteEnd) {
return keyPasteEnd, b[6:]
}
// If we get here then we have a key that we don't recognise, or a
// partial sequence. It's not clear how one should find the end of a
// sequence without knowing them all, but it seems that [a-zA-Z~] only
// appears at the end of a sequence.
for i, c := range b[0:] {
if c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c == '~' {
return keyUnknown, b[i+1:]
}
}
return utf8.RuneError, b
}
// queue appends data to the end of t.outBuf
func (t *Terminal) queue(data []rune) {
t.outBuf = append(t.outBuf, []byte(string(data))...)
}
var eraseUnderCursor = []rune{' ', keyEscape, '[', 'D'}
var space = []rune{' '}
func isPrintable(key rune) bool {
isInSurrogateArea := key >= 0xd800 && key <= 0xdbff
return key >= 32 && !isInSurrogateArea
}
// moveCursorToPos appends data to t.outBuf which will move the cursor to the
// given, logical position in the text.
func (t *Terminal) moveCursorToPos(pos int) {
if !t.echo {
return
}
x := visualLength(t.prompt) + pos
y := x / t.termWidth
x = x % t.termWidth
up := 0
if y < t.cursorY {
up = t.cursorY - y
}
down := 0
if y > t.cursorY {
down = y - t.cursorY
}
left := 0
if x < t.cursorX {
left = t.cursorX - x
}
right := 0
if x > t.cursorX {
right = x - t.cursorX
}
t.cursorX = x
t.cursorY = y
t.move(up, down, left, right)
}
func (t *Terminal) move(up, down, left, right int) {
movement := make([]rune, 3*(up+down+left+right))
m := movement
for i := 0; i < up; i++ {
m[0] = keyEscape
m[1] = '['
m[2] = 'A'
m = m[3:]
}
for i := 0; i < down; i++ {
m[0] = keyEscape
m[1] = '['
m[2] = 'B'
m = m[3:]
}
for i := 0; i < left; i++ {
m[0] = keyEscape
m[1] = '['
m[2] = 'D'
m = m[3:]
}
for i := 0; i < right; i++ {
m[0] = keyEscape
m[1] = '['
m[2] = 'C'
m = m[3:]
}
t.queue(movement)
}
func (t *Terminal) clearLineToRight() {
op := []rune{keyEscape, '[', 'K'}
t.queue(op)
}
const maxLineLength = 4096
func (t *Terminal) setLine(newLine []rune, newPos int) {
if t.echo {
t.moveCursorToPos(0)
t.writeLine(newLine)
for i := len(newLine); i < len(t.line); i++ {
t.writeLine(space)
}
t.moveCursorToPos(newPos)
}
t.line = newLine
t.pos = newPos
}
func (t *Terminal) advanceCursor(places int) {
t.cursorX += places
t.cursorY += t.cursorX / t.termWidth
if t.cursorY > t.maxLine {
t.maxLine = t.cursorY
}
t.cursorX = t.cursorX % t.termWidth
if places > 0 && t.cursorX == 0 {
// Normally terminals will advance the current position
// when writing a character. But that doesn't happen
// for the last character in a line. However, when
// writing a character (except a new line) that causes
// a line wrap, the position will be advanced two
// places.
//
// So, if we are stopping at the end of a line, we
// need to write a newline so that our cursor can be
// advanced to the next line.
t.outBuf = append(t.outBuf, '\r', '\n')
}
}
func (t *Terminal) eraseNPreviousChars(n int) {
if n == 0 {
return
}
if t.pos < n {
n = t.pos
}
t.pos -= n
t.moveCursorToPos(t.pos)
copy(t.line[t.pos:], t.line[n+t.pos:])
t.line = t.line[:len(t.line)-n]
if t.echo {
t.writeLine(t.line[t.pos:])
for i := 0; i < n; i++ {
t.queue(space)
}
t.advanceCursor(n)
t.moveCursorToPos(t.pos)
}
}
// countToLeftWord returns then number of characters from the cursor to the
// start of the previous word.
func (t *Terminal) countToLeftWord() int {
if t.pos == 0 {
return 0
}
pos := t.pos - 1
for pos > 0 {
if t.line[pos] != ' ' {
break
}
pos--
}
for pos > 0 {
if t.line[pos] == ' ' {
pos++
break
}
pos--
}
return t.pos - pos
}
// countToRightWord returns then number of characters from the cursor to the
// start of the next word.
func (t *Terminal) countToRightWord() int {
pos := t.pos
for pos < len(t.line) {
if t.line[pos] == ' ' {
break
}
pos++
}
for pos < len(t.line) {
if t.line[pos] != ' ' {
break
}
pos++
}
return pos - t.pos
}
// visualLength returns the number of visible glyphs in s.
func visualLength(runes []rune) int {
inEscapeSeq := false
length := 0
for _, r := range runes {
switch {
case inEscapeSeq:
if (r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z') {
inEscapeSeq = false
}
case r == '\x1b':
inEscapeSeq = true
default:
length++
}
}
return length
}
// handleKey processes the given key and, optionally, returns a line of text
// that the user has entered.
func (t *Terminal) handleKey(key rune) (line string, ok bool) {
if t.pasteActive && key != keyEnter {
t.addKeyToLine(key)
return
}
switch key {
case keyBackspace:
if t.pos == 0 {
return
}
t.eraseNPreviousChars(1)
case keyAltLeft:
// move left by a word.
t.pos -= t.countToLeftWord()
t.moveCursorToPos(t.pos)
case keyAltRight:
// move right by a word.
t.pos += t.countToRightWord()
t.moveCursorToPos(t.pos)
case keyLeft:
if t.pos == 0 {
return
}
t.pos--
t.moveCursorToPos(t.pos)
case keyRight:
if t.pos == len(t.line) {
return
}
t.pos++
t.moveCursorToPos(t.pos)
case keyHome:
if t.pos == 0 {
return
}
t.pos = 0
t.moveCursorToPos(t.pos)
case keyEnd:
if t.pos == len(t.line) {
return
}
t.pos = len(t.line)
t.moveCursorToPos(t.pos)
case keyUp:
entry, ok := t.history.NthPreviousEntry(t.historyIndex + 1)
if !ok {
return "", false
}
if t.historyIndex == -1 {
t.historyPending = string(t.line)
}
t.historyIndex++
runes := []rune(entry)
t.setLine(runes, len(runes))
case keyDown:
switch t.historyIndex {
case -1:
return
case 0:
runes := []rune(t.historyPending)
t.setLine(runes, len(runes))
t.historyIndex--
default:
entry, ok := t.history.NthPreviousEntry(t.historyIndex - 1)
if ok {
t.historyIndex--
runes := []rune(entry)
t.setLine(runes, len(runes))
}
}
case keyEnter:
t.moveCursorToPos(len(t.line))
t.queue([]rune("\r\n"))
line = string(t.line)
ok = true
t.line = t.line[:0]
t.pos = 0
t.cursorX = 0
t.cursorY = 0
t.maxLine = 0
case keyDeleteWord:
// Delete zero or more spaces and then one or more characters.
t.eraseNPreviousChars(t.countToLeftWord())
case keyDeleteLine:
// Delete everything from the current cursor position to the
// end of line.
for i := t.pos; i < len(t.line); i++ {
t.queue(space)
t.advanceCursor(1)
}
t.line = t.line[:t.pos]
t.moveCursorToPos(t.pos)
case keyCtrlD:
// Erase the character under the current position.
// The EOF case when the line is empty is handled in
// readLine().
if t.pos < len(t.line) {
t.pos++
t.eraseNPreviousChars(1)
}
case keyCtrlU:
t.eraseNPreviousChars(t.pos)
case keyClearScreen:
// Erases the screen and moves the cursor to the home position.
t.queue([]rune("\x1b[2J\x1b[H"))
t.queue(t.prompt)
t.cursorX, t.cursorY = 0, 0
t.advanceCursor(visualLength(t.prompt))
t.setLine(t.line, t.pos)
default:
if t.AutoCompleteCallback != nil {
prefix := string(t.line[:t.pos])
suffix := string(t.line[t.pos:])
t.lock.Unlock()
newLine, newPos, completeOk := t.AutoCompleteCallback(prefix+suffix, len(prefix), key)
t.lock.Lock()
if completeOk {
t.setLine([]rune(newLine), utf8.RuneCount([]byte(newLine)[:newPos]))
return
}
}
if !isPrintable(key) {
return
}
if len(t.line) == maxLineLength {
return
}
t.addKeyToLine(key)
}
return
}
// addKeyToLine inserts the given key at the current position in the current
// line.
func (t *Terminal) addKeyToLine(key rune) {
if len(t.line) == cap(t.line) {
newLine := make([]rune, len(t.line), 2*(1+len(t.line)))
copy(newLine, t.line)
t.line = newLine
}
t.line = t.line[:len(t.line)+1]
copy(t.line[t.pos+1:], t.line[t.pos:])
t.line[t.pos] = key
if t.echo {
t.writeLine(t.line[t.pos:])
}
t.pos++
t.moveCursorToPos(t.pos)
}
func (t *Terminal) writeLine(line []rune) {
for len(line) != 0 {
remainingOnLine := t.termWidth - t.cursorX
todo := len(line)
if todo > remainingOnLine {
todo = remainingOnLine
}
t.queue(line[:todo])
t.advanceCursor(visualLength(line[:todo]))
line = line[todo:]
}
}
// writeWithCRLF writes buf to w but replaces all occurrences of \n with \r\n.
func writeWithCRLF(w io.Writer, buf []byte) (n int, err error) {
for len(buf) > 0 {
i := bytes.IndexByte(buf, '\n')
todo := len(buf)
if i >= 0 {
todo = i
}
var nn int
nn, err = w.Write(buf[:todo])
n += nn
if err != nil {
return n, err
}
buf = buf[todo:]
if i >= 0 {
if _, err = w.Write(crlf); err != nil {
return n, err
}
n++
buf = buf[1:]
}
}
return n, nil
}
func (t *Terminal) Write(buf []byte) (n int, err error) {
t.lock.Lock()
defer t.lock.Unlock()
if t.cursorX == 0 && t.cursorY == 0 {
// This is the easy case: there's nothing on the screen that we
// have to move out of the way.
return writeWithCRLF(t.c, buf)
}
// We have a prompt and possibly user input on the screen. We
// have to clear it first.
t.move(0 /* up */, 0 /* down */, t.cursorX /* left */, 0 /* right */)
t.cursorX = 0
t.clearLineToRight()
for t.cursorY > 0 {
t.move(1 /* up */, 0, 0, 0)
t.cursorY--
t.clearLineToRight()
}
if _, err = t.c.Write(t.outBuf); err != nil {
return
}
t.outBuf = t.outBuf[:0]
if n, err = writeWithCRLF(t.c, buf); err != nil {
return
}
t.writeLine(t.prompt)
if t.echo {
t.writeLine(t.line)
}
t.moveCursorToPos(t.pos)
if _, err = t.c.Write(t.outBuf); err != nil {
return
}
t.outBuf = t.outBuf[:0]
return
}
// ReadPassword temporarily changes the prompt and reads a password, without
// echo, from the terminal.
func (t *Terminal) ReadPassword(prompt string) (line string, err error) {
t.lock.Lock()
defer t.lock.Unlock()
oldPrompt := t.prompt
t.prompt = []rune(prompt)
t.echo = false
line, err = t.readLine()
t.prompt = oldPrompt
t.echo = true
return
}
// ReadLine returns a line of input from the terminal.
func (t *Terminal) ReadLine() (line string, err error) {
t.lock.Lock()
defer t.lock.Unlock()
return t.readLine()
}
func (t *Terminal) readLine() (line string, err error) {
// t.lock must be held at this point
if t.cursorX == 0 && t.cursorY == 0 {
t.writeLine(t.prompt)
t.c.Write(t.outBuf)
t.outBuf = t.outBuf[:0]
}
lineIsPasted := t.pasteActive
for {
rest := t.remainder
lineOk := false
for !lineOk {
var key rune
key, rest = bytesToKey(rest, t.pasteActive)
if key == utf8.RuneError {
break
}
if !t.pasteActive {
if key == keyCtrlD {
if len(t.line) == 0 {
return "", io.EOF
}
}
if key == keyPasteStart {
t.pasteActive = true
if len(t.line) == 0 {
lineIsPasted = true
}
continue
}
} else if key == keyPasteEnd {
t.pasteActive = false
continue
}
if !t.pasteActive {
lineIsPasted = false
}
line, lineOk = t.handleKey(key)
}
if len(rest) > 0 {
n := copy(t.inBuf[:], rest)
t.remainder = t.inBuf[:n]
} else {
t.remainder = nil
}
t.c.Write(t.outBuf)
t.outBuf = t.outBuf[:0]
if lineOk {
if t.echo {
t.historyIndex = -1
t.history.Add(line)
}
if lineIsPasted {
err = ErrPasteIndicator
}
return
}
// t.remainder is a slice at the beginning of t.inBuf
// containing a partial key sequence
readBuf := t.inBuf[len(t.remainder):]
var n int
t.lock.Unlock()
n, err = t.c.Read(readBuf)
t.lock.Lock()
if err != nil {
return
}
t.remainder = t.inBuf[:n+len(t.remainder)]
}
}
// SetPrompt sets the prompt to be used when reading subsequent lines.
func (t *Terminal) SetPrompt(prompt string) {
t.lock.Lock()
defer t.lock.Unlock()
t.prompt = []rune(prompt)
}
func (t *Terminal) clearAndRepaintLinePlusNPrevious(numPrevLines int) {
// Move cursor to column zero at the start of the line.
t.move(t.cursorY, 0, t.cursorX, 0)
t.cursorX, t.cursorY = 0, 0
t.clearLineToRight()
for t.cursorY < numPrevLines {
// Move down a line
t.move(0, 1, 0, 0)
t.cursorY++
t.clearLineToRight()
}
// Move back to beginning.
t.move(t.cursorY, 0, 0, 0)
t.cursorX, t.cursorY = 0, 0
t.queue(t.prompt)
t.advanceCursor(visualLength(t.prompt))
t.writeLine(t.line)
t.moveCursorToPos(t.pos)
}
func (t *Terminal) SetSize(width, height int) error {
t.lock.Lock()
defer t.lock.Unlock()
if width == 0 {
width = 1
}
oldWidth := t.termWidth
t.termWidth, t.termHeight = width, height
switch {
case width == oldWidth:
// If the width didn't change then nothing else needs to be
// done.
return nil
case len(t.line) == 0 && t.cursorX == 0 && t.cursorY == 0:
// If there is nothing on current line and no prompt printed,
// just do nothing
return nil
case width < oldWidth:
// Some terminals (e.g. xterm) will truncate lines that were
// too long when shinking. Others, (e.g. gnome-terminal) will
// attempt to wrap them. For the former, repainting t.maxLine
// works great, but that behaviour goes badly wrong in the case
// of the latter because they have doubled every full line.
// We assume that we are working on a terminal that wraps lines
// and adjust the cursor position based on every previous line
// wrapping and turning into two. This causes the prompt on
// xterms to move upwards, which isn't great, but it avoids a
// huge mess with gnome-terminal.
if t.cursorX >= t.termWidth {
t.cursorX = t.termWidth - 1
}
t.cursorY *= 2
t.clearAndRepaintLinePlusNPrevious(t.maxLine * 2)
case width > oldWidth:
// If the terminal expands then our position calculations will
// be wrong in the future because we think the cursor is
// |t.pos| chars into the string, but there will be a gap at
// the end of any wrapped line.
//
// But the position will actually be correct until we move, so
// we can move back to the beginning and repaint everything.
t.clearAndRepaintLinePlusNPrevious(t.maxLine)
}
_, err := t.c.Write(t.outBuf)
t.outBuf = t.outBuf[:0]
return err
}
type pasteIndicatorError struct{}
func (pasteIndicatorError) Error() string {
return "terminal: ErrPasteIndicator not correctly handled"
}
// ErrPasteIndicator may be returned from ReadLine as the error, in addition
// to valid line data. It indicates that bracketed paste mode is enabled and
// that the returned line consists only of pasted data. Programs may wish to
// interpret pasted data more literally than typed data.
var ErrPasteIndicator = pasteIndicatorError{}
// SetBracketedPasteMode requests that the terminal bracket paste operations
// with markers. Not all terminals support this but, if it is supported, then
// enabling this mode will stop any autocomplete callback from running due to
// pastes. Additionally, any lines that are completely pasted will be returned
// from ReadLine with the error set to ErrPasteIndicator.
func (t *Terminal) SetBracketedPasteMode(on bool) {
if on {
io.WriteString(t.c, "\x1b[?2004h")
} else {
io.WriteString(t.c, "\x1b[?2004l")
}
}
// stRingBuffer is a ring buffer of strings.
type stRingBuffer struct {
// entries contains max elements.
entries []string
max int
// head contains the index of the element most recently added to the ring.
head int
// size contains the number of elements in the ring.
size int
}
func (s *stRingBuffer) Add(a string) {
if s.entries == nil {
const defaultNumEntries = 100
s.entries = make([]string, defaultNumEntries)
s.max = defaultNumEntries
}
s.head = (s.head + 1) % s.max
s.entries[s.head] = a
if s.size < s.max {
s.size++
}
}
// NthPreviousEntry returns the value passed to the nth previous call to Add.
// If n is zero then the immediately prior value is returned, if one, then the
// next most recent, and so on. If such an element doesn't exist then ok is
// false.
func (s *stRingBuffer) NthPreviousEntry(n int) (value string, ok bool) {
if n >= s.size {
return "", false
}
index := s.head - n
if index < 0 {
index += s.max
}
return s.entries[index], true
}
// readPasswordLine reads from reader until it finds \n or io.EOF.
// The slice returned does not include the \n.
// readPasswordLine also ignores any \r it finds.
func readPasswordLine(reader io.Reader) ([]byte, error) {
var buf [1]byte
var ret []byte
for {
n, err := reader.Read(buf[:])
if n > 0 {
switch buf[0] {
case '\n':
return ret, nil
case '\r':
// remove \r from passwords on Windows
default:
ret = append(ret, buf[0])
}
continue
}
if err != nil {
if err == io.EOF && len(ret) > 0 {
return ret, nil
}
return ret, err
}
}
}

Some files were not shown because too many files have changed in this diff Show More